diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml
index 2524d67fee..77f6cacfff 100644
--- a/.github/ISSUE_TEMPLATE/bug.yml
+++ b/.github/ISSUE_TEMPLATE/bug.yml
@@ -48,8 +48,8 @@ body:
label: Version
description: What is the version of CloudNativePG you are running?
options:
- - 1.22.1
- - 1.21.3
+ - 1.22.2
+ - 1.21.4
- trunk (main)
- older in 1.21.x
- older in 1.22.x
@@ -61,6 +61,7 @@ body:
attributes:
label: What version of Kubernetes are you using?
options:
+ - 1.29
- 1.28
- 1.27
- 1.26
diff --git a/.github/k8s_versions_scope.json b/.github/k8s_versions_scope.json
index b779c33306..50b63b69f1 100644
--- a/.github/k8s_versions_scope.json
+++ b/.github/k8s_versions_scope.json
@@ -6,5 +6,5 @@
"GKE": {"min": "1.23", "max": ""},
"OPENSHIFT": {"min": "4.11", "max": ""}
},
- "unit_test": {"min": "1.23", "max": "1.28"}
+ "unit_test": {"min": "1.23", "max": "1.29"}
}
diff --git a/.github/openshift_versions.json b/.github/openshift_versions.json
index df61ff92f7..e2b67d5b32 100644
--- a/.github/openshift_versions.json
+++ b/.github/openshift_versions.json
@@ -1,4 +1,5 @@
[
+ "4.15",
"4.14",
"4.13",
"4.12"
diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml
index 1940ad97ad..b92c73a575 100644
--- a/.github/workflows/backport.yml
+++ b/.github/workflows/backport.yml
@@ -9,7 +9,7 @@ on:
- main
env:
- GOLANG_VERSION: "1.21.x"
+ GOLANG_VERSION: "1.22.x"
jobs:
# Label the source pull request with 'backport-requested' and all supported releases label, the goal is, by default
diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml
index 4d19109263..ef429b160d 100644
--- a/.github/workflows/codeql-analysis.yml
+++ b/.github/workflows/codeql-analysis.yml
@@ -32,7 +32,7 @@ on:
# set up environment variables to be used across all the jobs
env:
- GOLANG_VERSION: "1.21.x"
+ GOLANG_VERSION: "1.22.x"
jobs:
duplicate_runs:
diff --git a/.github/workflows/continuous-delivery.yml b/.github/workflows/continuous-delivery.yml
index fbf93e2e8e..896e79e526 100644
--- a/.github/workflows/continuous-delivery.yml
+++ b/.github/workflows/continuous-delivery.yml
@@ -33,7 +33,7 @@ on:
# set up environment variables to be used across all the jobs
env:
- GOLANG_VERSION: "1.21.x"
+ GOLANG_VERSION: "1.22.x"
KUBEBUILDER_VERSION: "2.3.1"
KIND_VERSION: "v0.22.0"
ROOK_VERSION: "v1.12.0"
@@ -816,7 +816,7 @@ jobs:
steps:
-
name: Azure Login
- uses: azure/login@v1.6.1
+ uses: azure/login@v2.0.0
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
-
@@ -923,7 +923,7 @@ jobs:
password: ${{ env.REGISTRY_PASSWORD }}
-
name: Azure Login
- uses: azure/login@v1.6.1
+ uses: azure/login@v2.0.0
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
-
@@ -1150,7 +1150,7 @@ jobs:
-
name: Azure Login
if: always()
- uses: azure/login@v1.6.1
+ uses: azure/login@v2.0.0
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
-
diff --git a/.github/workflows/continuous-integration.yml b/.github/workflows/continuous-integration.yml
index 2a33c713b5..b681633bf4 100644
--- a/.github/workflows/continuous-integration.yml
+++ b/.github/workflows/continuous-integration.yml
@@ -16,8 +16,8 @@ on:
# set up environment variables to be used across all the jobs
env:
- GOLANG_VERSION: "1.21.x"
- GOLANGCI_LINT_VERSION: "v1.56"
+ GOLANG_VERSION: "1.22.x"
+ GOLANGCI_LINT_VERSION: "v1.57"
KUBEBUILDER_VERSION: "2.3.1"
OPERATOR_IMAGE_NAME: "ghcr.io/${{ github.repository }}-testing"
API_DOC_NAME: "cloudnative-pg.v1.md"
@@ -623,6 +623,7 @@ jobs:
if: |
!github.event.repository.fork &&
!github.event.pull_request.head.repo.fork
+ continue-on-error: true
with:
sarif_file: snyk.sarif
diff --git a/.github/workflows/public-cloud-k8s-versions-check.yml b/.github/workflows/public-cloud-k8s-versions-check.yml
index 089db19c04..fcdef3ba73 100644
--- a/.github/workflows/public-cloud-k8s-versions-check.yml
+++ b/.github/workflows/public-cloud-k8s-versions-check.yml
@@ -47,7 +47,7 @@ jobs:
if: github.event.inputs.limit == null || github.event.inputs.limit == 'eks'
-
name: Azure Login
- uses: azure/login@v1.6.1
+ uses: azure/login@v2.0.0
with:
creds: ${{ secrets.AZURE_CREDENTIALS }}
if: github.event.inputs.limit == null || github.event.inputs.limit == 'aks'
diff --git a/.github/workflows/release-publish.yml b/.github/workflows/release-publish.yml
index a585e1353e..30110f0fed 100644
--- a/.github/workflows/release-publish.yml
+++ b/.github/workflows/release-publish.yml
@@ -8,7 +8,7 @@ on:
- v*
env:
- GOLANG_VERSION: "1.21.x"
+ GOLANG_VERSION: "1.22.x"
CNPG_IMAGE_NAME: "ghcr.io/${{ github.repository }}"
permissions:
@@ -42,7 +42,7 @@ jobs:
/src/docs/src/${{ env.FILE }}
-
name: Release
- uses: softprops/action-gh-release@v1
+ uses: softprops/action-gh-release@v2
with:
body_path: release_notes.md
draft: false
@@ -56,6 +56,8 @@ jobs:
runs-on: ubuntu-22.04
outputs:
version: ${{ steps.build-meta.outputs.version }}
+ author_name: ${{ steps.build-meta.outputs.author_name }}
+ author_email: ${{ steps.build-meta.outputs.author_email }}
digest: ${{ steps.build.outputs.digest }}
platforms: ${{ env.PLATFORMS }}
steps:
@@ -86,6 +88,11 @@ jobs:
if [[ "$latest_release_branch" != "$current_release_branch" ]]; then
skip_krew="true"
fi
+
+ # get git user and email
+ author_name=$(git show -s --format='%an' "${commit_sha}")
+ author_email=$(git show -s --format='%ae' "${commit_sha}")
+
# use git describe to get the nearest tag and use that to build the version (e.g. 1.4.0-dev24 or 1.4.0)
commit_version=$(git describe --tags --match 'v*' "${commit_sha}"| sed -e 's/^v//; s/-g[0-9a-f]\+$//; s/-\([0-9]\+\)$/-dev\1/')
commit_short=$(git rev-parse --short "${commit_sha}")
@@ -94,6 +101,8 @@ jobs:
echo "version=${commit_version}" >> $GITHUB_OUTPUT
echo "COMMIT=${commit_short}" >> $GITHUB_ENV
echo "SKIP_KREW=${skip_krew}" >> $GITHUB_ENV
+ echo "author_name=${author_name}" >> $GITHUB_OUTPUT
+ echo "author_email=${author_email}" >> $GITHUB_OUTPUT
-
name: Import GPG key
id: import_gpg
@@ -344,3 +353,57 @@ jobs:
--
1 If you feel your Operator does not fit any of the pre-defined categories, file an issue against this repo and explain your need
2 For more information see [here](https://sdk.operatorframework.io/docs/overview/#operator-capability-level)
+
+
+ publish_bundle:
+ name: Publish OLM Bundle
+ needs: olm-bundle
+ if: |
+ (always() && !cancelled()) &&
+ needs.release-binaries.result == 'success'
+ env:
+ VERSION: ${{ needs.release-binaries.outputs.version }}
+ runs-on: ubuntu-22.04
+ steps:
+ -
+ name: Checkout artifact
+ uses: actions/checkout@v4
+ with:
+ repository: cloudnative-pg/artifacts
+ token: ${{ secrets.REPO_GHA_PAT }}
+ ref: main
+ fetch-depth: 0
+ -
+ name: Configure git user
+ run: |
+ git config user.email "${{ needs.release-binaries.outputs.author_email }}"
+ git config user.name "${{ needs.release-binaries.outputs.author_name }}"
+ -
+ name: Download the bundle
+ uses: actions/download-artifact@v4
+ with:
+ name: bundle
+ -
+ name: Copy the bundle
+ run: |
+ mkdir -p "bundles/${{ env.VERSION }}"
+ cp -R bundle/* "bundles/${{ env.VERSION }}"
+ rm -fr cloudnative-pg-catalog.yaml bundle.Dockerfile *.zip bundle/
+ -
+ name: Prepare commit message
+ env:
+ COMMIT_MESSAGE: |
+ operator cloudnative-pg (${{ env.VERSION }})
+ run: |
+ # Skip creating the commit if there are no changes
+ [ -n "$(git status -s)" ] || exit 0
+
+ git add bundle/${{ env.VERSION }}
+ git commit -m "${COMMIT_MESSAGE}"
+ -
+ name: Push commit
+ uses: ad-m/github-push-action@v0.8.0
+ with:
+ github_token: ${{ secrets.REPO_GHA_PAT }}
+ repository: cloudnative-pg/artifacts
+ branch: "main"
diff --git a/.github/workflows/require-labels.yml b/.github/workflows/require-labels.yml
index 55aa471ebc..24e84f5a6a 100644
--- a/.github/workflows/require-labels.yml
+++ b/.github/workflows/require-labels.yml
@@ -19,7 +19,7 @@ jobs:
runs-on: ubuntu-22.04
steps:
- name: Require labels
- uses: docker://agilepathway/pull-request-label-checker:v1.6.24
+ uses: docker://agilepathway/pull-request-label-checker:v1.6.27
with:
any_of: "ok to merge :ok_hand:"
none_of: "do not merge"
diff --git a/.wordlist-en-custom.txt b/.wordlist-en-custom.txt
index ced7ed7542..e38b5f0b0e 100644
--- a/.wordlist-en-custom.txt
+++ b/.wordlist-en-custom.txt
@@ -21,12 +21,14 @@ AzureCredentials
AzurePVCUpdateEnabled
Azurite
BDR
+BackupCapabilities
BackupConfiguration
BackupFrom
BackupLabelFile
BackupList
BackupMethod
BackupPhase
+BackupPluginConfiguration
BackupSnapshotElementStatus
BackupSnapshotStatus
BackupSource
@@ -125,6 +127,7 @@ EKS
EOF
EOL
EmbeddedObjectMetadata
+EnablePDB
EncryptionType
EndpointCA
EnsureOption
@@ -246,8 +249,10 @@ OnlineUpgrading
OpenSSL
OpenShift
Openshift
+OperatorCapabilities
OperatorGroup
OperatorHub
+PDB
PGAudit
PGDATA
PGDG
@@ -272,11 +277,14 @@ PgBouncerSecrets
PgBouncerSecretsVersions
PgBouncerSpec
Philippe
+PluginConfigurationList
+PluginStatus
PoLA
PodAffinity
PodAntiAffinity
PodAntiAffinityType
PodDisruptionBudget
+PodDisruptionBudgets
PodMeta
PodMonitor
PodSecurityPolicy
@@ -424,6 +432,7 @@ VolumeSnapshots
WAL
WAL's
WALBackupConfiguration
+WALCapabilities
WALs
Wadle
WalBackupConfiguration
@@ -488,6 +497,7 @@ backends
backport
backported
backporting
+backupCapabilities
backupID
backupId
backupLabelFile
@@ -657,6 +667,7 @@ ecdsa
edb
eks
enableAlterSystem
+enablePDB
enablePodAntiAffinity
enablePodMonitor
enableSuperuserAccess
@@ -893,6 +904,7 @@ openldap
openshift
operability
operativity
+operatorCapabilities
operatorframework
operatorgorup
operatorgroup
@@ -931,6 +943,8 @@ pid
pitr
plpgsql
pluggable
+pluginConfiguration
+pluginStatus
png
podAffinityTerm
podAntiAffinity
@@ -990,6 +1004,7 @@ readService
readinessProbe
readthedocs
readyInstances
+reconciler
reconciliationLoop
recoverability
recoveredCluster
@@ -1176,6 +1191,7 @@ unencrypted
unfence
unfencing
unix
+unsetting
unusablePVC
updateInterval
upgradable
@@ -1197,6 +1213,7 @@ volumeSnapshots
volumesnapshot
waitForArchive
wal
+walCapabilities
walClassName
walSegmentSize
walStorage
diff --git a/ADOPTERS.md b/ADOPTERS.md
index 2fefac5167..78bd30e53d 100644
--- a/ADOPTERS.md
+++ b/ADOPTERS.md
@@ -39,3 +39,4 @@ This list is sorted in chronological order, based on the submission date.
| [Shinkansen](https://shinkansen.finance) | @utaladriz, @afiebig | 2023-11-16 | Primary production high available PostgreSQL cluster, ISO27001 Backup and Recovery Compliance |
| [Ænix](https://aenix.io) | @kvaps | 2024-02-11 | Ænix provides consulting services for cloud providers and uses CloudNativePG in free PaaS platform [Cozystack](https://cozystack.io) for running PostgreSQL-as-a-Service. |
| [IBM](https://www.ibm.com) | @pgodowski | 2024-02-20 | IBM uses CloudNativePG as the embedded SQL database within the family of [IBM Cloud Pak](https://www.ibm.com/cloud-paks) products, running as customer-managed software on top of [OpenShift Container Platform](https://www.redhat.com/en/technologies/cloud-computing/openshift/container-platform). |
+| [Google Cloud](https://cloud.google.com/) | @mastersingh24 | 2024-03-12 | Leverage the full potential of cutting-edge PostgreSQL and CloudNativePG on [Google Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine) with EDB Community 360 PostgreSQL available in the [Google Cloud Marketplace](https://console.cloud.google.com/marketplace/product/public-edb-ppas/edb-postgresql). |
diff --git a/Makefile b/Makefile
index 458daf1a1d..d8fedd1af0 100644
--- a/Makefile
+++ b/Makefile
@@ -44,8 +44,8 @@ CONTROLLER_TOOLS_VERSION ?= v0.14.0
GORELEASER_VERSION ?= v1.24.0
SPELLCHECK_VERSION ?= 0.36.0
WOKE_VERSION ?= 0.19.0
-OPERATOR_SDK_VERSION ?= 1.33.0
-OPENSHIFT_VERSIONS ?= v4.11-v4.14
+OPERATOR_SDK_VERSION ?= 1.34.1
+OPENSHIFT_VERSIONS ?= v4.11-v4.15
ARCH ?= amd64
export CONTROLLER_IMG
@@ -140,7 +140,7 @@ olm-bundle: manifests kustomize operator-sdk ## Build the bundle for OLM install
rm -fr bundle bundle.Dockerfile ;\
sed -i -e "s/ClusterRole/Role/" "$${CONFIG_TMP_DIR}/config/rbac/role.yaml" "$${CONFIG_TMP_DIR}/config/rbac/role_binding.yaml" ;\
($(KUSTOMIZE) build "$${CONFIG_TMP_DIR}/config/olm-manifests") | \
- sed -e "s@\$${CREATED_AT}@$$(LANG=C date -Iseconds -u)@g" | \
+ sed -e "s@\$${VERSION}@${VERSION}@g" | \
$(OPERATOR_SDK) generate bundle --verbose --overwrite --manifests --metadata --package cloudnative-pg --channels stable-v1 --use-image-digests --default-channel stable-v1 --version "${VERSION}" ; \
echo -e "\n # OpenShift annotations." >> bundle/metadata/annotations.yaml ;\
echo -e " com.redhat.openshift.versions: $(OPENSHIFT_VERSIONS)" >> bundle/metadata/annotations.yaml ;\
diff --git a/api/v1/backup_types.go b/api/v1/backup_types.go
index 78da0ae98b..68c0b460de 100644
--- a/api/v1/backup_types.go
+++ b/api/v1/backup_types.go
@@ -73,6 +73,10 @@ const (
// BackupMethodBarmanObjectStore means using barman to backup the
// PostgreSQL cluster
BackupMethodBarmanObjectStore BackupMethod = "barmanObjectStore"
+
+ // BackupMethodPlugin means that this backup should be handled by
+ // a plugin
+ BackupMethodPlugin BackupMethod = "plugin"
)
// BackupSpec defines the desired state of Backup
@@ -90,13 +94,17 @@ type BackupSpec struct {
// +kubebuilder:validation:Enum=primary;prefer-standby
Target BackupTarget `json:"target,omitempty"`
- // The backup method to be used, possible options are `barmanObjectStore`
- // and `volumeSnapshot`. Defaults to: `barmanObjectStore`.
+ // The backup method to be used, possible options are `barmanObjectStore`,
+ // `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
// +optional
- // +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot
+ // +kubebuilder:validation:Enum=barmanObjectStore;volumeSnapshot;plugin
// +kubebuilder:default:=barmanObjectStore
Method BackupMethod `json:"method,omitempty"`
+ // Configuration parameters passed to the plugin managing this backup
+ // +optional
+ PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"`
+
// Whether the default type of backup with volume snapshots is
// online/hot (`true`, default) or offline/cold (`false`)
// Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
@@ -109,6 +117,18 @@ type BackupSpec struct {
OnlineConfiguration *OnlineConfiguration `json:"onlineConfiguration,omitempty"`
}
+// BackupPluginConfiguration contains the backup configuration used by
+// the backup plugin
+type BackupPluginConfiguration struct {
+ // Name is the name of the plugin managing this backup
+ Name string `json:"name"`
+
+ // Parameters are the configuration parameters passed to the backup
+ // plugin for this backup
+ // +optional
+ Parameters map[string]string `json:"parameters,omitempty"`
+}
+
// BackupSnapshotStatus the fields exclusive to the volumeSnapshot method backup
type BackupSnapshotStatus struct {
// The elements list, populated with the gathered volume snapshots
@@ -479,6 +499,11 @@ func (backup *Backup) GetVolumeSnapshotConfiguration(
return config
}
+// IsEmpty checks if the plugin configuration is empty or not
+func (configuration *BackupPluginConfiguration) IsEmpty() bool {
+ return configuration == nil || len(configuration.Name) == 0
+}
+
func init() {
SchemeBuilder.Register(&Backup{}, &BackupList{})
}
diff --git a/api/v1/backup_webhook.go b/api/v1/backup_webhook.go
index 37edc63e73..bf489eafdb 100644
--- a/api/v1/backup_webhook.go
+++ b/api/v1/backup_webhook.go
@@ -108,5 +108,13 @@ func (r *Backup) validate() field.ErrorList {
))
}
+ if r.Spec.Method == BackupMethodPlugin && r.Spec.PluginConfiguration.IsEmpty() {
+ result = append(result, field.Invalid(
+ field.NewPath("spec", "pluginConfiguration"),
+ r.Spec.OnlineConfiguration,
+ "cannot be empty when the backup method is plugin",
+ ))
+ }
+
return result
}
diff --git a/api/v1/cluster_plugins.go b/api/v1/cluster_plugins.go
new file mode 100644
index 0000000000..70f3512cac
--- /dev/null
+++ b/api/v1/cluster_plugins.go
@@ -0,0 +1,69 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// LoadPluginClient creates a new plugin client, loading the plugins that are
+// required by this cluster
+func (cluster *Cluster) LoadPluginClient(ctx context.Context) (client.Client, error) {
+ pluginNames := make([]string, len(cluster.Spec.Plugins))
+ for i, pluginDeclaration := range cluster.Spec.Plugins {
+ pluginNames[i] = pluginDeclaration.Name
+ }
+
+ return cluster.LoadSelectedPluginsClient(ctx, pluginNames)
+}
+
+// LoadSelectedPluginsClient creates a new plugin client, loading the requested
+// plugins
+func (cluster *Cluster) LoadSelectedPluginsClient(ctx context.Context, pluginNames []string) (client.Client, error) {
+ pluginLoader := client.NewUnixSocketClient(configuration.Current.PluginSocketDir)
+
+ // Load the plugins
+ for _, name := range pluginNames {
+ if err := pluginLoader.Load(ctx, name); err != nil {
+ return nil, err
+ }
+ }
+
+ return pluginLoader, nil
+}
+
+// GetWALPluginNames gets the list of all the plugin names capable of handling
+// the WAL service
+func (cluster *Cluster) GetWALPluginNames() (result []string) {
+ result = make([]string, 0, len(cluster.Status.PluginStatus))
+ for _, entry := range cluster.Status.PluginStatus {
+ if len(entry.WALCapabilities) > 0 {
+ result = append(result, entry.Name)
+ }
+ }
+
+ return result
+}
+
+// SetInContext records the cluster in the given context
+func (cluster *Cluster) SetInContext(ctx context.Context) context.Context {
+ return context.WithValue(ctx, utils.ContextKeyCluster, cluster)
+}
diff --git a/api/v1/cluster_types.go b/api/v1/cluster_types.go
index 20b6de00e2..44f17ae7e2 100644
--- a/api/v1/cluster_types.go
+++ b/api/v1/cluster_types.go
@@ -489,8 +489,28 @@ type ClusterSpec struct {
// The tablespaces configuration
// +optional
Tablespaces []TablespaceConfiguration `json:"tablespaces,omitempty"`
+
+ // Manage the `PodDisruptionBudget` resources within the cluster. When
+ // configured as `true` (default setting), the pod disruption budgets
+ // will safeguard the primary node from being terminated. Conversely,
+ // setting it to `false` will result in the absence of any
+ // `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ // hosting the PostgreSQL cluster. This latter configuration is
+ // advisable for any PostgreSQL cluster employed for
+ // development/staging purposes.
+ // +kubebuilder:default:=true
+ // +optional
+ EnablePDB *bool `json:"enablePDB,omitempty"`
+
+ // The plugins configuration, containing
+ // any plugin to be loaded with the corresponding configuration
+ Plugins PluginConfigurationList `json:"plugins,omitempty"`
}
+// PluginConfigurationList represent a set of plugin with their
+// configuration parameters
+type PluginConfigurationList []PluginConfiguration
+
const (
// PhaseSwitchover when a cluster is changing the primary node
PhaseSwitchover = "Switchover in progress"
@@ -896,6 +916,9 @@ type ClusterStatus struct {
// Image contains the image name used by the pods
// +optional
Image string `json:"image,omitempty"`
+
+ // PluginStatus is the status of the loaded plugins
+ PluginStatus []PluginStatus `json:"pluginStatus,omitempty"`
}
// InstanceReportedState describes the last reported state of an instance during a reconciliation loop
@@ -2343,6 +2366,42 @@ type ManagedConfiguration struct {
Roles []RoleConfiguration `json:"roles,omitempty"`
}
+// PluginConfiguration specifies a plugin that need to be loaded for this
+// cluster to be reconciled
+type PluginConfiguration struct {
+ // Name is the plugin name
+ Name string `json:"name"`
+
+ // Parameters is the configuration of the plugin
+ Parameters map[string]string `json:"parameters,omitempty"`
+}
+
+// PluginStatus is the status of a loaded plugin
+type PluginStatus struct {
+ // Name is the name of the plugin
+ Name string `json:"name"`
+
+ // Version is the version of the plugin loaded by the
+ // latest reconciliation loop
+ Version string `json:"version"`
+
+ // Capabilities are the list of capabilities of the
+ // plugin
+ Capabilities []string `json:"capabilities,omitempty"`
+
+ // OperatorCapabilities are the list of capabilities of the
+ // plugin regarding the reconciler
+ OperatorCapabilities []string `json:"operatorCapabilities,omitempty"`
+
+ // WALCapabilities are the list of capabilities of the
+ // plugin regarding the WAL management
+ WALCapabilities []string `json:"walCapabilities,omitempty"`
+
+ // BackupCapabilities are the list of capabilities of the
+ // plugin regarding the Backup management
+ BackupCapabilities []string `json:"backupCapabilities,omitempty"`
+}
+
// RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
// with the additional field Ensure specifying whether to ensure the presence or
// absence of the role in the database
@@ -2899,6 +2958,15 @@ func (cluster *Cluster) GetPrimaryUpdateMethod() PrimaryUpdateMethod {
return strategy
}
+// GetEnablePDB get the cluster EnablePDB value, defaults to true
+func (cluster *Cluster) GetEnablePDB() bool {
+ if cluster.Spec.EnablePDB == nil {
+ return true
+ }
+
+ return *cluster.Spec.EnablePDB
+}
+
// IsNodeMaintenanceWindowInProgress check if the upgrade mode is active or not
func (cluster *Cluster) IsNodeMaintenanceWindowInProgress() bool {
return cluster.Spec.NodeMaintenanceWindow != nil && cluster.Spec.NodeMaintenanceWindow.InProgress
diff --git a/api/v1/cluster_webhook.go b/api/v1/cluster_webhook.go
index 26e2352668..d7255d49ae 100644
--- a/api/v1/cluster_webhook.go
+++ b/api/v1/cluster_webhook.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1
import (
+ "context"
"encoding/json"
"fmt"
"strconv"
@@ -128,6 +129,7 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) {
UserSettings: r.Spec.PostgresConfiguration.Parameters,
IsReplicaCluster: r.IsReplica(),
PreserveFixedSettingsFromUser: preserveUserSettings,
+ IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta),
}
sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters()
r.Spec.PostgresConfiguration.Parameters = sanitizedParameters
@@ -162,6 +164,27 @@ func (r *Cluster) setDefaults(preserveUserSettings bool) {
if len(r.Spec.Tablespaces) > 0 {
r.defaultTablespaces()
}
+
+ ctx := context.Background()
+
+ // Call the plugins to help with defaulting this cluster
+ contextLogger := log.FromContext(ctx)
+ pluginClient, err := r.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the defaulting webhook, skipping")
+ return
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ var mutatedCluster Cluster
+ if err := pluginClient.MutateCluster(ctx, r, &mutatedCluster); err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the defaulting webhook, skipping")
+ return
+ }
+
+ mutatedCluster.DeepCopyInto(r)
}
// defaultTablespaces adds the tablespace owner where the
@@ -286,8 +309,29 @@ var _ webhook.Validator = &Cluster{}
func (r *Cluster) ValidateCreate() (admission.Warnings, error) {
clusterLog.Info("validate create", "name", r.Name, "namespace", r.Namespace)
allErrs := r.Validate()
+
+ // Call the plugins to help validating this cluster creation
+ ctx := context.Background()
+ contextLogger := log.FromContext(ctx)
+ pluginClient, err := r.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the validate/create webhook")
+ return nil, err
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ pluginValidationResult, err := pluginClient.ValidateClusterCreate(ctx, r)
+ if err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the validate/update webhook")
+ return nil, err
+ }
+ allErrs = append(allErrs, pluginValidationResult...)
+ allWarnings := r.getAdmissionWarnings()
+
if len(allErrs) == 0 {
- return nil, nil
+ return allWarnings, nil
}
return nil, apierrors.NewInvalid(
@@ -334,6 +378,7 @@ func (r *Cluster) Validate() (allErrs field.ErrorList) {
r.validateManagedRoles,
r.validateManagedExtensions,
r.validateResources,
+ r.validateHibernationAnnotation,
}
for _, validate := range validations {
@@ -356,8 +401,27 @@ func (r *Cluster) ValidateUpdate(old runtime.Object) (admission.Warnings, error)
r.ValidateChanges(oldCluster)...,
)
+ // Call the plugins to help validating this cluster update
+ ctx := context.Background()
+ contextLogger := log.FromContext(ctx)
+ pluginClient, err := r.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the validate/create webhook")
+ return nil, err
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ pluginValidationResult, err := pluginClient.ValidateClusterUpdate(ctx, oldCluster, r)
+ if err != nil {
+ contextLogger.Error(err, "Error invoking plugin in the validate/update webhook")
+ return nil, err
+ }
+ allErrs = append(allErrs, pluginValidationResult...)
+
if len(allErrs) == 0 {
- return nil, nil
+ return r.getAdmissionWarnings(), nil
}
return nil, apierrors.NewInvalid(
@@ -383,6 +447,7 @@ func (r *Cluster) ValidateChanges(old *Cluster) (allErrs field.ErrorList) {
r.validateReplicaModeChange,
r.validateUnixPermissionIdentifierChange,
r.validateReplicationSlotsChange,
+ r.validateWALLevelChange,
}
for _, validate := range validations {
allErrs = append(allErrs, validate(old)...)
@@ -1059,10 +1124,11 @@ func (r *Cluster) validateConfiguration() field.ErrorList {
"Unsupported PostgreSQL version. Versions 11 or newer are supported"))
}
info := postgres.ConfigurationInfo{
- Settings: postgres.CnpgConfigurationSettings,
- MajorVersion: pgVersion,
- UserSettings: r.Spec.PostgresConfiguration.Parameters,
- IsReplicaCluster: r.IsReplica(),
+ Settings: postgres.CnpgConfigurationSettings,
+ MajorVersion: pgVersion,
+ UserSettings: r.Spec.PostgresConfiguration.Parameters,
+ IsReplicaCluster: r.IsReplica(),
+ IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&r.ObjectMeta),
}
sanitizedParameters := postgres.CreatePostgresqlConfiguration(info).GetConfigurationParameters()
@@ -1079,13 +1145,14 @@ func (r *Cluster) validateConfiguration() field.ErrorList {
}
}
- walLevel := postgres.WalLevelValue(sanitizedParameters[postgres.WalLevelParameter])
- hasWalLevelRequirement := r.Spec.Instances > 1 || sanitizedParameters["archive_mode"] != "off" || r.IsReplica()
+ walLevel := postgres.WalLevelValue(sanitizedParameters[postgres.ParameterWalLevel])
+ hasWalLevelRequirement := r.Spec.Instances > 1 || sanitizedParameters[postgres.ParameterArchiveMode] != "off" ||
+ r.IsReplica()
if !walLevel.IsKnownValue() {
result = append(
result,
field.Invalid(
- field.NewPath("spec", "postgresql", "parameters", postgres.WalLevelParameter),
+ field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLevel),
walLevel,
fmt.Sprintf("unrecognized `wal_level` value - allowed values: `%s`, `%s`, `%s`",
postgres.WalLevelValueLogical,
@@ -1096,12 +1163,23 @@ func (r *Cluster) validateConfiguration() field.ErrorList {
result = append(
result,
field.Invalid(
- field.NewPath("spec", "postgresql", "parameters", postgres.WalLevelParameter),
+ field.NewPath("spec", "postgresql", "parameters", postgres.ParameterWalLevel),
walLevel,
"`wal_level` should be set at `logical` or `replica` when `archive_mode` is `on`, "+
"'.instances' field is greater than 1, or this is a replica cluster"))
}
+ if walLevel == "minimal" {
+ if value, ok := sanitizedParameters[postgres.ParameterMaxWalSenders]; !ok || value != "0" {
+ result = append(
+ result,
+ field.Invalid(
+ field.NewPath("spec", "postgresql", "parameters", "max_wal_senders"),
+ walLevel,
+ "`max_wal_senders` should be set at `0` when `wal_level` is `minimal`"))
+ }
+ }
+
if value := r.Spec.PostgresConfiguration.Parameters[sharedBuffersParameter]; value != "" {
if _, err := parsePostgresQuantityValue(value); err != nil {
result = append(
@@ -2108,6 +2186,22 @@ func (r *Cluster) validateReplicationSlotsChange(old *Cluster) field.ErrorList {
return errs
}
+func (r *Cluster) validateWALLevelChange(old *Cluster) field.ErrorList {
+ var errs field.ErrorList
+
+ newWALLevel := r.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLevel]
+ oldWALLevel := old.Spec.PostgresConfiguration.Parameters[postgres.ParameterWalLevel]
+
+ if newWALLevel == "minimal" && len(oldWALLevel) > 0 && oldWALLevel != newWALLevel {
+ errs = append(errs, field.Invalid(
+ field.NewPath("spec", "postgresql", "parameters", "wal_level"),
+ "minimal",
+ fmt.Sprintf("Change of `wal_level` to `minimal` not allowed on an existing cluster (from %s)", oldWALLevel)))
+ }
+
+ return errs
+}
+
// validateAzureCredentials checks and validates the azure credentials
func (azure *AzureCredentials) validateAzureCredentials(path *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
@@ -2321,3 +2415,39 @@ func (r *Cluster) validatePgFailoverSlots() field.ErrorList {
return result
}
+
+func (r *Cluster) getAdmissionWarnings() admission.Warnings {
+ return r.getMaintenanceWindowsAdmissionWarnings()
+}
+
+func (r *Cluster) getMaintenanceWindowsAdmissionWarnings() admission.Warnings {
+ var result admission.Warnings
+
+ if r.Spec.NodeMaintenanceWindow != nil {
+ result = append(
+ result,
+ "Consider using `.spec.enablePDB` instead of the node maintenance window feature")
+ }
+ return result
+}
+
+// validate whether the hibernation configuration is valid
+func (r *Cluster) validateHibernationAnnotation() field.ErrorList {
+ value, ok := r.Annotations[utils.HibernationAnnotationName]
+ isKnownValue := value == string(utils.HibernationAnnotationValueOn) ||
+ value == string(utils.HibernationAnnotationValueOff)
+ if !ok || isKnownValue {
+ return nil
+ }
+
+ return field.ErrorList{
+ field.Invalid(
+ field.NewPath("metadata", "annotations", utils.HibernationAnnotationName),
+ value,
+ fmt.Sprintf("Annotation value for hibernation should be %q or %q",
+ utils.HibernationAnnotationValueOn,
+ utils.HibernationAnnotationValueOff,
+ ),
+ ),
+ }
+}
diff --git a/api/v1/cluster_webhook_test.go b/api/v1/cluster_webhook_test.go
index 8742118abc..3d0ed42b5d 100644
--- a/api/v1/cluster_webhook_test.go
+++ b/api/v1/cluster_webhook_test.go
@@ -27,6 +27,7 @@ import (
"k8s.io/utils/ptr"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/versions"
. "github.com/onsi/ginkgo/v2"
@@ -1079,7 +1080,8 @@ var _ = Describe("configuration change validation", func() {
},
PostgresConfiguration: PostgresConfiguration{
Parameters: map[string]string{
- "wal_level": "minimal",
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
},
},
},
@@ -1136,7 +1138,8 @@ var _ = Describe("configuration change validation", func() {
Instances: 2,
PostgresConfiguration: PostgresConfiguration{
Parameters: map[string]string{
- "wal_level": "minimal",
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
},
},
},
@@ -1199,7 +1202,8 @@ var _ = Describe("configuration change validation", func() {
},
PostgresConfiguration: PostgresConfiguration{
Parameters: map[string]string{
- "wal_level": "minimal",
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
},
},
},
@@ -1207,6 +1211,121 @@ var _ = Describe("configuration change validation", func() {
Expect(cluster.IsReplica()).To(BeTrue())
Expect(cluster.validateConfiguration()).To(HaveLen(1))
})
+
+ It("should allow minimal wal_level with one instance and without archive mode", func() {
+ cluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
+ },
+ },
+ },
+ }
+ Expect(cluster.validateConfiguration()).To(BeEmpty())
+ })
+
+ It("should disallow minimal wal_level with one instance, without max_wal_senders being specified", func() {
+ cluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_level": "minimal",
+ },
+ },
+ },
+ }
+ Expect(cluster.validateConfiguration()).To(HaveLen(1))
+ })
+
+ It("should disallow changing wal_level to minimal for existing clusters", func() {
+ oldCluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "max_wal_senders": "0",
+ },
+ },
+ },
+ }
+ oldCluster.setDefaults(true)
+
+ cluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
+ },
+ },
+ },
+ }
+ Expect(cluster.validateWALLevelChange(&oldCluster)).To(HaveLen(1))
+ })
+
+ It("should allow retaining wal_level to minimal for existing clusters", func() {
+ oldCluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
+ },
+ },
+ },
+ }
+ oldCluster.setDefaults(true)
+
+ cluster := Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.SkipWalArchiving: "enabled",
+ },
+ },
+ Spec: ClusterSpec{
+ Instances: 1,
+ PostgresConfiguration: PostgresConfiguration{
+ Parameters: map[string]string{
+ "wal_level": "minimal",
+ "max_wal_senders": "0",
+ "shared_buffers": "512MB",
+ },
+ },
+ },
+ }
+ Expect(cluster.validateWALLevelChange(&oldCluster)).To(BeEmpty())
+ })
})
var _ = Describe("validate image name change", func() {
@@ -4093,3 +4212,38 @@ var _ = Describe("Tablespaces validation", func() {
Expect(cluster.validateTablespaceBackupSnapshot()).To(HaveLen(1))
})
})
+
+var _ = Describe("Validate hibernation", func() {
+ It("should succeed if hibernation is set to 'on'", func() {
+ cluster := &Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOn),
+ },
+ },
+ }
+ Expect(cluster.validateHibernationAnnotation()).To(BeEmpty())
+ })
+
+ It("should succeed if hibernation is set to 'off'", func() {
+ cluster := &Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.HibernationAnnotationName: string(utils.HibernationAnnotationValueOff),
+ },
+ },
+ }
+ Expect(cluster.validateHibernationAnnotation()).To(BeEmpty())
+ })
+
+ It("should fail if hibernation is set to an invalid value", func() {
+ cluster := &Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ utils.HibernationAnnotationName: "",
+ },
+ },
+ }
+ Expect(cluster.validateHibernationAnnotation()).To(HaveLen(1))
+ })
+})
diff --git a/api/v1/pooler_types.go b/api/v1/pooler_types.go
index 97eda16fb5..a935dad20e 100644
--- a/api/v1/pooler_types.go
+++ b/api/v1/pooler_types.go
@@ -36,7 +36,7 @@ const (
PoolerTypeRO = PoolerType("ro")
// DefaultPgBouncerPoolerAuthQuery is the default auth_query for PgBouncer
- DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM user_search($1)"
+ DefaultPgBouncerPoolerAuthQuery = "SELECT usename, passwd FROM public.user_search($1)"
)
// PgBouncerPoolMode is the mode of PgBouncer
@@ -135,13 +135,13 @@ type PgBouncerSpec struct {
// The credentials of the user that need to be used for the authentication
// query. In case it is specified, also an AuthQuery
- // (e.g. "SELECT usename, passwd FROM pg_shadow WHERE usename=$1")
+ // (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
// has to be specified and no automatic CNPG Cluster integration will be triggered.
// +optional
AuthQuerySecret *LocalObjectReference `json:"authQuerySecret,omitempty"`
// The query that will be used to download the hash of the password
- // of a certain user. Default: "SELECT usename, passwd FROM user_search($1)".
+ // of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
// In case it is specified, also an AuthQuerySecret has to be specified and
// no automatic CNPG Cluster integration will be triggered.
// +optional
diff --git a/api/v1/scheduledbackup_types.go b/api/v1/scheduledbackup_types.go
index f1a64f246d..c959111599 100644
--- a/api/v1/scheduledbackup_types.go
+++ b/api/v1/scheduledbackup_types.go
@@ -67,6 +67,10 @@ type ScheduledBackupSpec struct {
// +kubebuilder:default:=barmanObjectStore
Method BackupMethod `json:"method,omitempty"`
+ // Configuration parameters passed to the plugin managing this backup
+ // +optional
+ PluginConfiguration *BackupPluginConfiguration `json:"pluginConfiguration,omitempty"`
+
// Whether the default type of backup with volume snapshots is
// online/hot (`true`, default) or offline/cold (`false`)
// Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
@@ -180,6 +184,7 @@ func (scheduledBackup *ScheduledBackup) CreateBackup(name string) *Backup {
Method: scheduledBackup.Spec.Method,
Online: scheduledBackup.Spec.Online,
OnlineConfiguration: scheduledBackup.Spec.OnlineConfiguration,
+ PluginConfiguration: scheduledBackup.Spec.PluginConfiguration,
},
}
utils.InheritAnnotations(&backup.ObjectMeta, scheduledBackup.Annotations, nil, configuration.Current)
diff --git a/api/v1/zz_generated.deepcopy.go b/api/v1/zz_generated.deepcopy.go
index 3b197d5af4..0f1b093d97 100644
--- a/api/v1/zz_generated.deepcopy.go
+++ b/api/v1/zz_generated.deepcopy.go
@@ -212,6 +212,28 @@ func (in *BackupList) DeepCopyObject() runtime.Object {
return nil
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BackupPluginConfiguration) DeepCopyInto(out *BackupPluginConfiguration) {
+ *out = *in
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupPluginConfiguration.
+func (in *BackupPluginConfiguration) DeepCopy() *BackupPluginConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(BackupPluginConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BackupSnapshotElementStatus) DeepCopyInto(out *BackupSnapshotElementStatus) {
*out = *in
@@ -272,6 +294,11 @@ func (in *BackupSource) DeepCopy() *BackupSource {
func (in *BackupSpec) DeepCopyInto(out *BackupSpec) {
*out = *in
out.Cluster = in.Cluster
+ if in.PluginConfiguration != nil {
+ in, out := &in.PluginConfiguration, &out.PluginConfiguration
+ *out = new(BackupPluginConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
if in.Online != nil {
in, out := &in.Online, &out.Online
*out = new(bool)
@@ -871,6 +898,18 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
+ if in.EnablePDB != nil {
+ in, out := &in.EnablePDB, &out.EnablePDB
+ *out = new(bool)
+ **out = **in
+ }
+ if in.Plugins != nil {
+ in, out := &in.Plugins, &out.Plugins
+ *out = make(PluginConfigurationList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSpec.
@@ -980,6 +1019,13 @@ func (in *ClusterStatus) DeepCopyInto(out *ClusterStatus) {
*out = make([]string, len(*in))
copy(*out, *in)
}
+ if in.PluginStatus != nil {
+ in, out := &in.PluginStatus, &out.PluginStatus
+ *out = make([]PluginStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterStatus.
@@ -1754,6 +1800,84 @@ func (in *PgBouncerSpec) DeepCopy() *PgBouncerSpec {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PluginConfiguration) DeepCopyInto(out *PluginConfiguration) {
+ *out = *in
+ if in.Parameters != nil {
+ in, out := &in.Parameters, &out.Parameters
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfiguration.
+func (in *PluginConfiguration) DeepCopy() *PluginConfiguration {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginConfiguration)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in PluginConfigurationList) DeepCopyInto(out *PluginConfigurationList) {
+ {
+ in := &in
+ *out = make(PluginConfigurationList, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfigurationList.
+func (in PluginConfigurationList) DeepCopy() PluginConfigurationList {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginConfigurationList)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PluginStatus) DeepCopyInto(out *PluginStatus) {
+ *out = *in
+ if in.Capabilities != nil {
+ in, out := &in.Capabilities, &out.Capabilities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.OperatorCapabilities != nil {
+ in, out := &in.OperatorCapabilities, &out.OperatorCapabilities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.WALCapabilities != nil {
+ in, out := &in.WALCapabilities, &out.WALCapabilities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.BackupCapabilities != nil {
+ in, out := &in.BackupCapabilities, &out.BackupCapabilities
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginStatus.
+func (in *PluginStatus) DeepCopy() *PluginStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PluginStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) {
*out = *in
@@ -2299,6 +2423,11 @@ func (in *ScheduledBackupSpec) DeepCopyInto(out *ScheduledBackupSpec) {
**out = **in
}
out.Cluster = in.Cluster
+ if in.PluginConfiguration != nil {
+ in, out := &in.PluginConfiguration, &out.PluginConfiguration
+ *out = new(BackupPluginConfiguration)
+ (*in).DeepCopyInto(*out)
+ }
if in.Online != nil {
in, out := &in.Online, &out.Online
*out = new(bool)
diff --git a/cmd/kubectl-cnpg/main.go b/cmd/kubectl-cnpg/main.go
index 7f9e7d6d4a..fffb8f7f89 100644
--- a/cmd/kubectl-cnpg/main.go
+++ b/cmd/kubectl-cnpg/main.go
@@ -33,6 +33,8 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/fio"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/hibernate"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/install"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logs"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/maintenance"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/pgadmin"
@@ -55,8 +57,11 @@ func main() {
configFlags := genericclioptions.NewConfigFlags(true)
rootCmd := &cobra.Command{
- Use: "kubectl-cnpg",
- Short: "A plugin to manage your CloudNativePG clusters",
+ Use: "kubectl-cnpg",
+ Short: "A plugin to manage your CloudNativePG clusters",
+ Annotations: map[string]string{
+ cobra.CommandDisplayNameAnnotation: "kubectl cnpg",
+ },
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, _ []string) error {
logFlags.ConfigureLogging()
@@ -94,6 +99,8 @@ func main() {
rootCmd.AddCommand(snapshot.NewCmd())
rootCmd.AddCommand(logs.NewCmd())
rootCmd.AddCommand(pgadmin.NewCmd())
+ rootCmd.AddCommand(publication.NewCmd())
+ rootCmd.AddCommand(subscription.NewCmd())
if err := rootCmd.Execute(); err != nil {
os.Exit(1)
diff --git a/cmd/manager/main.go b/cmd/manager/main.go
index 5a41656506..15e818ec37 100644
--- a/cmd/manager/main.go
+++ b/cmd/manager/main.go
@@ -27,6 +27,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/backup"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/bootstrap"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/controller"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/debug"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/instance"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/pgbouncer"
"github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/show"
@@ -60,6 +61,7 @@ func main() {
cmd.AddCommand(walrestore.NewCmd())
cmd.AddCommand(versions.NewCmd())
cmd.AddCommand(pgbouncer.NewCmd())
+ cmd.AddCommand(debug.NewCmd())
if err := cmd.Execute(); err != nil {
os.Exit(1)
diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml
index ad704694de..40414d9cc9 100644
--- a/config/crd/bases/postgresql.cnpg.io_backups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml
@@ -69,11 +69,12 @@ spec:
method:
default: barmanObjectStore
description: |-
- The backup method to be used, possible options are `barmanObjectStore`
- and `volumeSnapshot`. Defaults to: `barmanObjectStore`.
+ The backup method to be used, possible options are `barmanObjectStore`,
+ `volumeSnapshot` or `plugin`. Defaults to: `barmanObjectStore`.
enum:
- barmanObjectStore
- volumeSnapshot
+ - plugin
type: string
online:
description: |-
@@ -108,6 +109,23 @@ spec:
an immediate segment switch.
type: boolean
type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
target:
description: |-
The policy to decide which instance should perform this backup. If empty,
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 2ae6fc75ca..65d59f883c 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -1877,6 +1877,18 @@ spec:
description:
description: Description of this PostgreSQL cluster
type: string
+ enablePDB:
+ default: true
+ description: |-
+ Manage the `PodDisruptionBudget` resources within the cluster. When
+ configured as `true` (default setting), the pod disruption budgets
+ will safeguard the primary node from being terminated. Conversely,
+ setting it to `false` will result in the absence of any
+ `PodDisruptionBudget` resource, permitting the shutdown of all nodes
+ hosting the PostgreSQL cluster. This latter configuration is
+ advisable for any PostgreSQL cluster employed for
+ development/staging purposes.
+ type: boolean
enableSuperuserAccess:
default: false
description: |-
@@ -3210,6 +3222,27 @@ spec:
up again) or not (recreate it elsewhere - when `instances` >1)
type: boolean
type: object
+ plugins:
+ description: |-
+ The plugins configuration, containing
+ any plugin to be loaded with the corresponding configuration
+ items:
+ description: |-
+ PluginConfiguration specifies a plugin that need to be loaded for this
+ cluster to be reconciled
+ properties:
+ name:
+ description: Name is the plugin name
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: Parameters is the configuration of the plugin
+ type: object
+ required:
+ - name
+ type: object
+ type: array
postgresGID:
default: 26
description: The GID of the `postgres` user inside the image, defaults
@@ -5141,6 +5174,52 @@ spec:
phaseReason:
description: Reason for the current phase
type: string
+ pluginStatus:
+ description: PluginStatus is the status of the loaded plugins
+ items:
+ description: PluginStatus is the status of a loaded plugin
+ properties:
+ backupCapabilities:
+ description: |-
+ BackupCapabilities are the list of capabilities of the
+ plugin regarding the Backup management
+ items:
+ type: string
+ type: array
+ capabilities:
+ description: |-
+ Capabilities are the list of capabilities of the
+ plugin
+ items:
+ type: string
+ type: array
+ name:
+ description: Name is the name of the plugin
+ type: string
+ operatorCapabilities:
+ description: |-
+ OperatorCapabilities are the list of capabilities of the
+ plugin regarding the reconciler
+ items:
+ type: string
+ type: array
+ version:
+ description: |-
+ Version is the version of the plugin loaded by the
+ latest reconciliation loop
+ type: string
+ walCapabilities:
+ description: |-
+ WALCapabilities are the list of capabilities of the
+ plugin regarding the WAL management
+ items:
+ type: string
+ type: array
+ required:
+ - name
+ - version
+ type: object
+ type: array
poolerIntegrations:
description: The integration needed by poolers referencing the cluster
properties:
diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
index d41dd6d1ff..52a13be330 100644
--- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
@@ -323,7 +323,7 @@ spec:
authQuery:
description: |-
The query that will be used to download the hash of the password
- of a certain user. Default: "SELECT usename, passwd FROM user_search($1)".
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
In case it is specified, also an AuthQuerySecret has to be specified and
no automatic CNPG Cluster integration will be triggered.
type: string
@@ -331,7 +331,7 @@ spec:
description: |-
The credentials of the user that need to be used for the authentication
query. In case it is specified, also an AuthQuery
- (e.g. "SELECT usename, passwd FROM pg_shadow WHERE usename=$1")
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
has to be specified and no automatic CNPG Cluster integration will be triggered.
properties:
name:
diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
index 2654a631ff..c90fcc9806 100644
--- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
@@ -118,6 +118,23 @@ spec:
an immediate segment switch.
type: boolean
type: object
+ pluginConfiguration:
+ description: Configuration parameters passed to the plugin managing
+ this backup
+ properties:
+ name:
+ description: Name is the name of the plugin managing this backup
+ type: string
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Parameters are the configuration parameters passed to the backup
+ plugin for this backup
+ type: object
+ required:
+ - name
+ type: object
schedule:
description: |-
The schedule does not follow the same format used in Kubernetes CronJobs
diff --git a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
index fc7d322bf2..7853003212 100644
--- a/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
+++ b/config/olm-manifests/bases/cloudnative-pg.clusterserviceversion.yaml
@@ -8,10 +8,11 @@ metadata:
operators.operatorframework.io/project_layout: ""
description: CloudNativePG is an open source operator designed to manage highly available PostgreSQL databases with a primary/standby architecture on any supported Kubernetes cluster.
certified: "true"
- createdAt: ${CREATED_AT}
+ createdAt:
containerImage: $(OPERATOR_IMAGE_NAME)
repository: https://github.com/cloudnative-pg/cloudnative-pg
support: Community
+ olm.skipRange: '>= 1.18.0 < ${VERSION}'
labels:
operatorframework.io/arch.amd64: supported
name: cloudnative-pg.v0.0.0
diff --git a/config/olm-manifests/kustomization.yaml b/config/olm-manifests/kustomization.yaml
index b3d274f9b0..bf8467f738 100644
--- a/config/olm-manifests/kustomization.yaml
+++ b/config/olm-manifests/kustomization.yaml
@@ -3,5 +3,6 @@ resources:
- ../olm-default
- ../olm-samples
- ../olm-scorecard
+- ../olm-rbac
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
diff --git a/config/olm-rbac/kustomization.yaml b/config/olm-rbac/kustomization.yaml
new file mode 100644
index 0000000000..38eafa9ffa
--- /dev/null
+++ b/config/olm-rbac/kustomization.yaml
@@ -0,0 +1,5 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+- role_global.yaml
+- role_binding_global.yaml
diff --git a/config/olm-rbac/role_binding_global.yaml b/config/olm-rbac/role_binding_global.yaml
new file mode 100644
index 0000000000..fec929bed9
--- /dev/null
+++ b/config/olm-rbac/role_binding_global.yaml
@@ -0,0 +1,12 @@
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
diff --git a/config/olm-rbac/role_global.yaml b/config/olm-rbac/role_global.yaml
new file mode 100644
index 0000000000..ca6d6488be
--- /dev/null
+++ b/config/olm-rbac/role_global.yaml
@@ -0,0 +1,22 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/controllers/backup_controller.go b/controllers/backup_controller.go
index 37d4472cfe..b793367d0c 100644
--- a/controllers/backup_controller.go
+++ b/controllers/backup_controller.go
@@ -126,14 +126,9 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
return ctrl.Result{}, nil
}
- if cluster.Spec.Backup == nil {
- message := fmt.Sprintf(
- "cannot proceed with the backup because cluster '%s' has no backup section defined",
- clusterName)
- contextLogger.Warning(message)
- r.Recorder.Event(&backup, "Warning", "ClusterHasNoBackupConfig", message)
- tryFlagBackupAsFailed(ctx, r.Client, &backup, errors.New(message))
- return ctrl.Result{}, nil
+ // Plugin pre-hooks
+ if hookResult := preReconcilePluginHooks(ctx, &cluster, &backup); hookResult.StopReconciliation {
+ return hookResult.Result, hookResult.Err
}
// This check is still needed for when the backup resource creation is forced through the webhook
@@ -154,6 +149,21 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
}
if backup.Spec.Method == apiv1.BackupMethodBarmanObjectStore {
+ if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
+ tryFlagBackupAsFailed(ctx, r.Client, &backup,
+ errors.New("no barmanObjectStore section defined on the target cluster"))
+ return ctrl.Result{}, nil
+ }
+
+ if isRunning {
+ return ctrl.Result{}, nil
+ }
+
+ r.Recorder.Eventf(&backup, "Normal", "Starting",
+ "Starting backup for cluster %v", cluster.Name)
+ }
+
+ if backup.Spec.Method == apiv1.BackupMethodPlugin {
if isRunning {
return ctrl.Result{}, nil
}
@@ -163,8 +173,12 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
}
origBackup := backup.DeepCopy()
+
+ // From now on, we differentiate backups managed by the instance manager (barman and plugins)
+ // from the ones managed directly by the operator (VolumeSnapshot)
+
switch backup.Spec.Method {
- case apiv1.BackupMethodBarmanObjectStore:
+ case apiv1.BackupMethodBarmanObjectStore, apiv1.BackupMethodPlugin:
// If no good running backups are found we elect a pod for the backup
pod, err := r.getBackupTargetPod(ctx, &cluster, &backup)
if apierrs.IsNotFound(err) {
@@ -195,19 +209,14 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
"cluster", cluster.Name,
"pod", pod.Name)
- if cluster.Spec.Backup.BarmanObjectStore == nil {
- tryFlagBackupAsFailed(ctx, r.Client, &backup,
- errors.New("no barmanObjectStore section defined on the target cluster"))
- return ctrl.Result{}, nil
- }
// This backup has been started
- if err := startBarmanBackup(ctx, r.Client, &backup, pod, &cluster); err != nil {
+ if err := startInstanceManagerBackup(ctx, r.Client, &backup, pod, &cluster); err != nil {
r.Recorder.Eventf(&backup, "Warning", "Error", "Backup exit with error %v", err)
tryFlagBackupAsFailed(ctx, r.Client, &backup, fmt.Errorf("encountered an error while taking the backup: %w", err))
return ctrl.Result{}, nil
}
case apiv1.BackupMethodVolumeSnapshot:
- if cluster.Spec.Backup.VolumeSnapshot == nil {
+ if cluster.Spec.Backup == nil || cluster.Spec.Backup.VolumeSnapshot == nil {
tryFlagBackupAsFailed(ctx, r.Client, &backup,
errors.New("no volumeSnapshot section defined on the target cluster"))
return ctrl.Result{}, nil
@@ -224,8 +233,11 @@ func (r *BackupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
return ctrl.Result{}, fmt.Errorf("unrecognized method: %s", backup.Spec.Method)
}
+ // plugin post hooks
contextLogger.Debug(fmt.Sprintf("object %#q has been reconciled", req.NamespacedName))
- return ctrl.Result{}, nil
+
+ hookResult := postReconcilePluginHooks(ctx, &cluster, &backup)
+ return hookResult.Result, hookResult.Err
}
func (r *BackupReconciler) isValidBackupRunning(
@@ -539,9 +551,9 @@ func (r *BackupReconciler) getBackupTargetPod(ctx context.Context,
return &pod, err
}
-// startBarmanBackup request a backup in a Pod and marks the backup started
+// startInstanceManagerBackup request a backup in a Pod and marks the backup started
// or failed if needed
-func startBarmanBackup(
+func startInstanceManagerBackup(
ctx context.Context,
client client.Client,
backup *apiv1.Backup,
@@ -550,7 +562,7 @@ func startBarmanBackup(
) error {
// This backup has been started
status := backup.GetStatus()
- status.SetAsStarted(pod, apiv1.BackupMethodBarmanObjectStore)
+ status.SetAsStarted(pod, backup.Spec.Method)
if err := postgres.PatchBackupStatusAndRetry(ctx, client, backup); err != nil {
return err
diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go
index 59b8e9c5fc..1556d4cf93 100644
--- a/controllers/cluster_controller.go
+++ b/controllers/cluster_controller.go
@@ -42,6 +42,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/reconcile"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/operatorclient"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
@@ -80,7 +81,7 @@ func NewClusterReconciler(mgr manager.Manager, discoveryClient *discovery.Discov
return &ClusterReconciler{
StatusClient: instance.NewStatusClient(),
DiscoveryClient: discoveryClient,
- Client: mgr.GetClient(),
+ Client: operatorclient.NewExtendedClient(mgr.GetClient()),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("cloudnative-pg"),
}
@@ -144,7 +145,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
return ctrl.Result{}, err
}
-
+ ctx = cluster.SetInContext(ctx)
// Run the inner reconcile loop. Translate any ErrNextLoop to an errorless return
result, err := r.reconcile(ctx, cluster)
if errors.Is(err, ErrNextLoop) {
@@ -157,7 +158,7 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
}
// Inner reconcile loop. Anything inside can require the reconciliation loop to stop by returning ErrNextLoop
-// nolint:gocognit
+// nolint:gocognit,gocyclo
func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluster) (ctrl.Result, error) {
contextLogger := log.FromContext(ctx)
@@ -195,8 +196,16 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return ctrl.Result{}, fmt.Errorf("cannot set image name: %w", err)
}
+ // Ensure we load all the plugins that are required to reconcile this cluster
+ if err := r.updatePluginsStatus(ctx, cluster); err != nil {
+ return ctrl.Result{}, fmt.Errorf("cannot reconcile required plugins: %w", err)
+ }
+
// Ensure we reconcile the orphan resources if present when we reconcile for the first time a cluster
- if err := r.reconcileRestoredCluster(ctx, cluster); err != nil {
+ if res, err := r.reconcileRestoredCluster(ctx, cluster); res != nil || err != nil {
+ if res != nil {
+ return *res, nil
+ }
return ctrl.Result{}, fmt.Errorf("cannot reconcile restored Cluster: %w", err)
}
@@ -224,6 +233,11 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return ctrl.Result{}, fmt.Errorf("cannot update the resource status: %w", err)
}
+ // Calls pre-reconcile hooks
+ if hookResult := preReconcilePluginHooks(ctx, cluster, cluster); hookResult.StopReconciliation {
+ return hookResult.Result, hookResult.Err
+ }
+
if cluster.Status.CurrentPrimary != "" &&
cluster.Status.CurrentPrimary != cluster.Status.TargetPrimary {
contextLogger.Info("There is a switchover or a failover "+
@@ -247,7 +261,31 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
return ctrl.Result{}, fmt.Errorf("cannot update the instances status on the cluster: %w", err)
}
- if err := instanceReconciler.ReconcileMetadata(ctx, r.Client, cluster, resources.instances); err != nil {
+ if err := persistentvolumeclaim.ReconcileMetadata(
+ ctx,
+ r.Client,
+ cluster,
+ resources.pvcs.Items,
+ ); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if err := instanceReconciler.ReconcileMetadata(
+ ctx,
+ r.Client,
+ cluster,
+ resources.instances.Items,
+ ); err != nil {
+ return ctrl.Result{}, err
+ }
+
+ if err := persistentvolumeclaim.ReconcileSerialAnnotation(
+ ctx,
+ r.Client,
+ cluster,
+ resources.instances.Items,
+ resources.pvcs.Items,
+ ); err != nil {
return ctrl.Result{}, err
}
@@ -339,7 +377,14 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *apiv1.Cluste
}
// Updates all the objects managed by the controller
- return r.reconcileResources(ctx, cluster, resources, instancesStatus)
+ res, err := r.reconcileResources(ctx, cluster, resources, instancesStatus)
+ if err != nil || !res.IsZero() {
+ return res, err
+ }
+
+ // Calls post-reconcile hooks
+ hookResult := postReconcilePluginHooks(ctx, cluster, cluster)
+ return hookResult.Result, hookResult.Err
}
func (r *ClusterReconciler) handleSwitchover(
diff --git a/controllers/cluster_create.go b/controllers/cluster_create.go
index 0ea395b865..697b581519 100644
--- a/controllers/cluster_create.go
+++ b/controllers/cluster_create.go
@@ -108,6 +108,10 @@ func (r *ClusterReconciler) createPostgresClusterObjects(ctx context.Context, cl
}
func (r *ClusterReconciler) reconcilePodDisruptionBudget(ctx context.Context, cluster *apiv1.Cluster) error {
+ if !cluster.GetEnablePDB() {
+ return r.deletePodDisruptionBudgetIfExists(ctx, cluster)
+ }
+
// The PDB should not be enforced if we are inside a maintenance
// window, and we chose to avoid allocating more storage space.
if cluster.IsNodeMaintenanceWindowInProgress() && cluster.IsReusePVCEnabled() {
@@ -416,6 +420,18 @@ func (r *ClusterReconciler) createOrPatchOwnedPodDisruptionBudget(
return nil
}
+func (r *ClusterReconciler) deletePodDisruptionBudgetIfExists(ctx context.Context, cluster *apiv1.Cluster) error {
+ if err := r.deletePrimaryPodDisruptionBudget(ctx, cluster); err != nil && !apierrs.IsNotFound(err) {
+ return fmt.Errorf("unable to retrieve primary PodDisruptionBudget: %w", err)
+ }
+
+ if err := r.deleteReplicasPodDisruptionBudget(ctx, cluster); err != nil && !apierrs.IsNotFound(err) {
+ return fmt.Errorf("unable to retrieve replica PodDisruptionBudget: %w", err)
+ }
+
+ return nil
+}
+
// deleteReplicasPodDisruptionBudget ensures that we delete the PDB requiring to remove one node at a time
func (r *ClusterReconciler) deletePrimaryPodDisruptionBudget(ctx context.Context, cluster *apiv1.Cluster) error {
return r.deletePodDisruptionBudget(
@@ -968,47 +984,42 @@ func (r *ClusterReconciler) createPrimaryInstance(
return ctrl.Result{}, nil
}
- // Generate a new node serial
- nodeSerial, err := r.generateNodeSerial(ctx, cluster)
- if err != nil {
- return ctrl.Result{}, fmt.Errorf("cannot generate node serial: %w", err)
- }
-
- var backup *apiv1.Backup
+ var (
+ backup *apiv1.Backup
+ recoverySnapshot *persistentvolumeclaim.StorageSource
+ )
+ // If the cluster is bootstrapping from recovery, it may do so from:
+ // 1 - a backup object, which may be done with volume snapshots or object storage
+ // 2 - volume snapshots
+ // We need to check that whichever alternative is used, the backup/snapshot is completed.
if cluster.Spec.Bootstrap != nil &&
- cluster.Spec.Bootstrap.Recovery != nil &&
- cluster.Spec.Bootstrap.Recovery.Backup != nil {
+ cluster.Spec.Bootstrap.Recovery != nil {
+ var err error
backup, err = r.getOriginBackup(ctx, cluster)
if err != nil {
return ctrl.Result{}, err
}
- if backup == nil {
- contextLogger.Info("Missing backup object, can't continue full recovery",
- "backup", cluster.Spec.Bootstrap.Recovery.Backup)
- return ctrl.Result{
- Requeue: true,
- RequeueAfter: time.Minute,
- }, nil
- }
- if backup.Status.Phase != apiv1.BackupPhaseCompleted {
- contextLogger.Info("The source backup object is not completed, can't continue full recovery",
- "backup", cluster.Spec.Bootstrap.Recovery.Backup,
- "backupPhase", backup.Status.Phase)
- return ctrl.Result{
- Requeue: true,
- RequeueAfter: time.Minute,
- }, nil
+
+ if res, err := r.checkReadyForRecovery(ctx, backup, cluster); !res.IsZero() || err != nil {
+ return res, err
}
+
+ recoverySnapshot = persistentvolumeclaim.GetCandidateStorageSourceForPrimary(cluster, backup)
}
- // Get the source storage from where to create the primary instance.
- candidateSource := persistentvolumeclaim.GetCandidateStorageSourceForPrimary(cluster, backup)
+ // Generate a new node serial
+ nodeSerial, err := r.generateNodeSerial(ctx, cluster)
+ if err != nil {
+ return ctrl.Result{}, fmt.Errorf("cannot generate node serial: %w", err)
+ }
+ // Create the PVCs from the cluster definition, and if bootstrapping from
+ // recoverySnapshot, use that as the source
if err := persistentvolumeclaim.CreateInstancePVCs(
ctx,
r.Client,
cluster,
- candidateSource,
+ recoverySnapshot,
nodeSerial,
); err != nil {
return ctrl.Result{RequeueAfter: time.Minute}, err
@@ -1017,47 +1028,27 @@ func (r *ClusterReconciler) createPrimaryInstance(
// We are bootstrapping a cluster and in need to create the first node
var job *batchv1.Job
+ isBootstrappingFromRecovery := cluster.Spec.Bootstrap != nil && cluster.Spec.Bootstrap.Recovery != nil
+ isBootstrappingFromBaseBackup := cluster.Spec.Bootstrap != nil && cluster.Spec.Bootstrap.PgBaseBackup != nil
switch {
- case cluster.Spec.Bootstrap != nil && cluster.Spec.Bootstrap.Recovery != nil:
- volumeSnapshotsRecovery := cluster.Spec.Bootstrap.Recovery.VolumeSnapshots
- if volumeSnapshotsRecovery != nil {
- status, err := persistentvolumeclaim.VerifyDataSourceCoherence(
- ctx, r.Client, cluster.Namespace, volumeSnapshotsRecovery)
- if err != nil {
- return ctrl.Result{}, err
- }
- if status.ContainsErrors() {
- contextLogger.Warning(
- "Volume snapshots verification failed, retrying",
- "status", status)
- return ctrl.Result{
- Requeue: true,
- RequeueAfter: 5 * time.Second,
- }, nil
- }
- if status.ContainsWarnings() {
- contextLogger.Warning("Volume snapshots verification warnings",
- "status", status)
- }
- }
-
- if candidateSource != nil {
- var snapshot volumesnapshot.VolumeSnapshot
- if err := r.Client.Get(ctx,
- types.NamespacedName{Name: candidateSource.DataSource.Name, Namespace: cluster.Namespace},
- &snapshot); err != nil {
- return ctrl.Result{}, err
- }
- r.Recorder.Event(cluster, "Normal", "CreatingInstance", "Primary instance (from volumeSnapshots)")
- job = specs.CreatePrimaryJobViaRestoreSnapshot(*cluster, nodeSerial, snapshot, backup)
- break
+ case isBootstrappingFromRecovery && recoverySnapshot != nil:
+ var snapshot volumesnapshot.VolumeSnapshot
+ if err := r.Client.Get(ctx,
+ types.NamespacedName{Name: recoverySnapshot.DataSource.Name, Namespace: cluster.Namespace},
+ &snapshot); err != nil {
+ return ctrl.Result{}, err
}
+ r.Recorder.Event(cluster, "Normal", "CreatingInstance", "Primary instance (from volumeSnapshots)")
+ job = specs.CreatePrimaryJobViaRestoreSnapshot(*cluster, nodeSerial, snapshot, backup)
+ case isBootstrappingFromRecovery:
r.Recorder.Event(cluster, "Normal", "CreatingInstance", "Primary instance (from backup)")
job = specs.CreatePrimaryJobViaRecovery(*cluster, nodeSerial, backup)
- case cluster.Spec.Bootstrap != nil && cluster.Spec.Bootstrap.PgBaseBackup != nil:
+
+ case isBootstrappingFromBaseBackup:
r.Recorder.Event(cluster, "Normal", "CreatingInstance", "Primary instance (from physical backup)")
job = specs.CreatePrimaryJobViaPgBaseBackup(*cluster, nodeSerial)
+
default:
r.Recorder.Event(cluster, "Normal", "CreatingInstance", "Primary instance (initdb)")
job = specs.CreatePrimaryJobViaInitdb(*cluster, nodeSerial)
@@ -1363,3 +1354,56 @@ func findInstancePodToCreate(
return nil, nil
}
+
+// checkReadyForRecovery checks if the backup or volumeSnapshots are ready, and
+// returns for requeue if not
+func (r *ClusterReconciler) checkReadyForRecovery(
+ ctx context.Context,
+ backup *apiv1.Backup,
+ cluster *apiv1.Cluster,
+) (ctrl.Result, error) {
+ contextLogger := log.FromContext(ctx)
+
+ if cluster.Spec.Bootstrap.Recovery.Backup != nil {
+ if backup == nil {
+ contextLogger.Info("Missing backup object, can't continue full recovery",
+ "backup", cluster.Spec.Bootstrap.Recovery.Backup)
+ return ctrl.Result{
+ Requeue: true,
+ RequeueAfter: time.Minute,
+ }, nil
+ }
+ if backup.Status.Phase != apiv1.BackupPhaseCompleted {
+ contextLogger.Info("The source backup object is not completed, can't continue full recovery",
+ "backup", cluster.Spec.Bootstrap.Recovery.Backup,
+ "backupPhase", backup.Status.Phase)
+ return ctrl.Result{
+ Requeue: true,
+ RequeueAfter: time.Minute,
+ }, nil
+ }
+ }
+
+ volumeSnapshotsRecovery := cluster.Spec.Bootstrap.Recovery.VolumeSnapshots
+ if volumeSnapshotsRecovery != nil {
+ status, err := persistentvolumeclaim.VerifyDataSourceCoherence(
+ ctx, r.Client, cluster.Namespace, volumeSnapshotsRecovery)
+ if err != nil {
+ return ctrl.Result{}, err
+ }
+ if status.ContainsErrors() {
+ contextLogger.Warning(
+ "Volume snapshots verification failed, retrying",
+ "status", status)
+ return ctrl.Result{
+ Requeue: true,
+ RequeueAfter: 5 * time.Second,
+ }, nil
+ }
+ if status.ContainsWarnings() {
+ contextLogger.Warning("Volume snapshots verification warnings",
+ "status", status)
+ }
+ }
+ return ctrl.Result{}, nil
+}
diff --git a/controllers/cluster_create_test.go b/controllers/cluster_create_test.go
index 60e0e284b0..8f49c63522 100644
--- a/controllers/cluster_create_test.go
+++ b/controllers/cluster_create_test.go
@@ -19,6 +19,7 @@ package controllers
import (
"context"
+ volumesnapshot "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
v1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1"
@@ -33,19 +34,20 @@ import (
"k8s.io/utils/ptr"
k8client "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
var _ = Describe("cluster_create unit tests", func() {
- It("should make sure that reconcilePostgresSecrets works correctly", func() {
- ctx := context.Background()
+ It("should make sure that reconcilePostgresSecrets works correctly", func(ctx SpecContext) {
namespace := newFakeNamespace()
cluster := newFakeCNPGCluster(namespace)
pooler := newFakePooler(cluster)
@@ -100,8 +102,7 @@ var _ = Describe("cluster_create unit tests", func() {
})
})
- It("should make sure that superUser secret is created if EnableSuperuserAccess is enabled", func() {
- ctx := context.Background()
+ It("should make sure that superUser secret is created if EnableSuperuserAccess is enabled", func(ctx SpecContext) {
namespace := newFakeNamespace()
cluster := newFakeCNPGCluster(namespace)
cluster.Spec.EnableSuperuserAccess = ptr.To(true)
@@ -125,8 +126,7 @@ var _ = Describe("cluster_create unit tests", func() {
})
})
- It("should make sure that reconcilePostgresServices works correctly", func() {
- ctx := context.Background()
+ It("should make sure that reconcilePostgresServices works correctly", func(ctx SpecContext) {
namespace := newFakeNamespace()
cluster := newFakeCNPGCluster(namespace)
@@ -142,116 +142,115 @@ var _ = Describe("cluster_create unit tests", func() {
})
})
- It("should make sure that reconcilePostgresServices works correctly if create any service is enabled", func() {
- ctx := context.Background()
- namespace := newFakeNamespace()
- cluster := newFakeCNPGCluster(namespace)
- configuration.Current.CreateAnyService = true
+ It("should make sure that reconcilePostgresServices works correctly if create any service is enabled",
+ func(ctx SpecContext) {
+ namespace := newFakeNamespace()
+ cluster := newFakeCNPGCluster(namespace)
+ configuration.Current.CreateAnyService = true
- By("executing reconcilePostgresServices", func() {
- err := clusterReconciler.reconcilePostgresServices(ctx, cluster)
- Expect(err).ToNot(HaveOccurred())
- })
+ By("executing reconcilePostgresServices", func() {
+ err := clusterReconciler.reconcilePostgresServices(ctx, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ })
- By("making sure that the services have been created", func() {
- expectResourceExistsWithDefaultClient(cluster.GetServiceAnyName(), namespace, &corev1.Service{})
- expectResourceExistsWithDefaultClient(cluster.GetServiceReadOnlyName(), namespace, &corev1.Service{})
- expectResourceExistsWithDefaultClient(cluster.GetServiceReadWriteName(), namespace, &corev1.Service{})
- expectResourceExistsWithDefaultClient(cluster.GetServiceReadName(), namespace, &corev1.Service{})
+ By("making sure that the services have been created", func() {
+ expectResourceExistsWithDefaultClient(cluster.GetServiceAnyName(), namespace, &corev1.Service{})
+ expectResourceExistsWithDefaultClient(cluster.GetServiceReadOnlyName(), namespace, &corev1.Service{})
+ expectResourceExistsWithDefaultClient(cluster.GetServiceReadWriteName(), namespace, &corev1.Service{})
+ expectResourceExistsWithDefaultClient(cluster.GetServiceReadName(), namespace, &corev1.Service{})
+ })
})
- })
-
- It("should make sure that reconcilePostgresServices can update the selectors on existing services", func() {
- ctx := context.Background()
- namespace := newFakeNamespace()
- cluster := newFakeCNPGCluster(namespace)
- configuration.Current.CreateAnyService = true
- createOutdatedService := func(svc *corev1.Service) {
- cluster.SetInheritedDataAndOwnership(&svc.ObjectMeta)
- svc.Spec.Selector = map[string]string{
- "outdated": "selector",
+ It("should make sure that reconcilePostgresServices can update the selectors on existing services",
+ func(ctx SpecContext) {
+ namespace := newFakeNamespace()
+ cluster := newFakeCNPGCluster(namespace)
+ configuration.Current.CreateAnyService = true
+
+ createOutdatedService := func(svc *corev1.Service) {
+ cluster.SetInheritedDataAndOwnership(&svc.ObjectMeta)
+ svc.Spec.Selector = map[string]string{
+ "outdated": "selector",
+ }
+ err := clusterReconciler.Client.Create(ctx, svc)
+ Expect(err).ToNot(HaveOccurred())
}
- err := clusterReconciler.Client.Create(ctx, svc)
- Expect(err).ToNot(HaveOccurred())
- }
-
- checkService := func(before *corev1.Service, expectedLabels map[string]string) {
- var afterChangesService corev1.Service
- err := clusterReconciler.Client.Get(ctx, types.NamespacedName{
- Name: before.Name,
- Namespace: before.Namespace,
- }, &afterChangesService)
- Expect(err).ToNot(HaveOccurred())
- Expect(afterChangesService.Spec.Selector).ToNot(Equal(before.Spec.Selector))
- Expect(afterChangesService.Spec.Selector).To(Equal(expectedLabels))
- Expect(afterChangesService.Labels).To(Equal(before.Labels))
- Expect(afterChangesService.Annotations).To(Equal(before.Annotations))
- }
+ checkService := func(before *corev1.Service, expectedLabels map[string]string) {
+ var afterChangesService corev1.Service
+ err := clusterReconciler.Client.Get(ctx, types.NamespacedName{
+ Name: before.Name,
+ Namespace: before.Namespace,
+ }, &afterChangesService)
+ Expect(err).ToNot(HaveOccurred())
- var readOnlyService, readWriteService, readService, anyService *corev1.Service
- By("creating the resources with outdated selectors", func() {
- By("creating any service", func() {
- svc := specs.CreateClusterAnyService(*cluster)
- createOutdatedService(svc)
- anyService = svc.DeepCopy()
- })
+ Expect(afterChangesService.Spec.Selector).ToNot(Equal(before.Spec.Selector))
+ Expect(afterChangesService.Spec.Selector).To(Equal(expectedLabels))
+ Expect(afterChangesService.Labels).To(Equal(before.Labels))
+ Expect(afterChangesService.Annotations).To(Equal(before.Annotations))
+ }
- By("creating read service", func() {
- svc := specs.CreateClusterReadService(*cluster)
- createOutdatedService(svc)
- readService = svc.DeepCopy()
+ var readOnlyService, readWriteService, readService, anyService *corev1.Service
+ By("creating the resources with outdated selectors", func() {
+ By("creating any service", func() {
+ svc := specs.CreateClusterAnyService(*cluster)
+ createOutdatedService(svc)
+ anyService = svc.DeepCopy()
+ })
+
+ By("creating read service", func() {
+ svc := specs.CreateClusterReadService(*cluster)
+ createOutdatedService(svc)
+ readService = svc.DeepCopy()
+ })
+
+ By("creating read-write service", func() {
+ svc := specs.CreateClusterReadWriteService(*cluster)
+ createOutdatedService(svc)
+ readWriteService = svc.DeepCopy()
+ })
+ By("creating read only service", func() {
+ svc := specs.CreateClusterReadOnlyService(*cluster)
+ createOutdatedService(svc)
+ readOnlyService = svc.DeepCopy()
+ })
})
- By("creating read-write service", func() {
- svc := specs.CreateClusterReadWriteService(*cluster)
- createOutdatedService(svc)
- readWriteService = svc.DeepCopy()
- })
- By("creating read only service", func() {
- svc := specs.CreateClusterReadOnlyService(*cluster)
- createOutdatedService(svc)
- readOnlyService = svc.DeepCopy()
+ By("executing reconcilePostgresServices", func() {
+ err := clusterReconciler.reconcilePostgresServices(ctx, cluster)
+ Expect(err).ToNot(HaveOccurred())
})
- })
-
- By("executing reconcilePostgresServices", func() {
- err := clusterReconciler.reconcilePostgresServices(ctx, cluster)
- Expect(err).ToNot(HaveOccurred())
- })
- By("checking any service", func() {
- checkService(anyService, map[string]string{
- "cnpg.io/podRole": "instance",
- "cnpg.io/cluster": cluster.Name,
+ By("checking any service", func() {
+ checkService(anyService, map[string]string{
+ "cnpg.io/podRole": "instance",
+ "cnpg.io/cluster": cluster.Name,
+ })
})
- })
- By("checking read-write service", func() {
- checkService(readWriteService, map[string]string{
- "cnpg.io/cluster": cluster.Name,
- "role": "primary",
+ By("checking read-write service", func() {
+ checkService(readWriteService, map[string]string{
+ "cnpg.io/cluster": cluster.Name,
+ "role": "primary",
+ })
})
- })
- By("checking read service", func() {
- checkService(readService, map[string]string{
- "cnpg.io/cluster": cluster.Name,
- "cnpg.io/podRole": "instance",
+ By("checking read service", func() {
+ checkService(readService, map[string]string{
+ "cnpg.io/cluster": cluster.Name,
+ "cnpg.io/podRole": "instance",
+ })
})
- })
- By("checking read only service", func() {
- checkService(readOnlyService, map[string]string{
- "cnpg.io/cluster": cluster.Name,
- "role": "replica",
+ By("checking read only service", func() {
+ checkService(readOnlyService, map[string]string{
+ "cnpg.io/cluster": cluster.Name,
+ "role": "replica",
+ })
})
})
- })
- It("should make sure that createOrPatchServiceAccount works correctly", func() {
- ctx := context.Background()
+ It("should make sure that createOrPatchServiceAccount works correctly", func(ctx SpecContext) {
namespace := newFakeNamespace()
cluster := newFakeCNPGCluster(namespace)
@@ -323,8 +322,7 @@ var _ = Describe("cluster_create unit tests", func() {
})
})
- It("should make sure that reconcilePodDisruptionBudget works correctly", func() {
- ctx := context.Background()
+ It("should make sure that reconcilePodDisruptionBudget works correctly", func(ctx SpecContext) {
namespace := newFakeNamespace()
cluster := newFakeCNPGCluster(namespace)
pdbReplicaName := specs.BuildReplicasPodDisruptionBudget(cluster).Name
@@ -395,6 +393,208 @@ var _ = Describe("cluster_create unit tests", func() {
})
})
+var _ = Describe("check if bootstrap recovery can proceed", func() {
+ var namespace, clusterName, name string
+
+ BeforeEach(func() {
+ namespace = newFakeNamespace()
+ clusterName = "awesomeCluster"
+ name = "foo"
+ })
+
+ _ = DescribeTable("from backup",
+ func(backup *apiv1.Backup, expectRequeue bool) {
+ cluster := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: clusterName,
+ Namespace: namespace,
+ },
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
+ Size: "1G",
+ },
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ Backup: &apiv1.BackupSource{
+ LocalObjectReference: apiv1.LocalObjectReference{
+ Name: name,
+ },
+ },
+ },
+ },
+ },
+ }
+
+ ctx := context.Background()
+ res, err := clusterReconciler.checkReadyForRecovery(ctx, backup, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ if expectRequeue {
+ Expect(res).ToNot(BeNil())
+ Expect(res).ToNot(Equal(reconcile.Result{}))
+ } else {
+ Expect(res).To(Or(BeNil(), Equal(reconcile.Result{})))
+ }
+ },
+ Entry(
+ "when bootstrapping from a completed backup",
+ &apiv1.Backup{
+ Status: apiv1.BackupStatus{
+ Phase: apiv1.BackupPhaseCompleted,
+ },
+ },
+ false),
+ Entry(
+ "when bootstrapping from an incomplete backup",
+ &apiv1.Backup{
+ Status: apiv1.BackupStatus{
+ Phase: apiv1.BackupPhaseRunning,
+ },
+ },
+ true),
+ Entry("when bootstrapping a backup that is not there",
+ nil, true),
+ )
+})
+
+var _ = Describe("check if bootstrap recovery can proceed from volume snapshot", func() {
+ var namespace, clusterName string
+ var cluster *apiv1.Cluster
+
+ BeforeEach(func() {
+ namespace = newFakeNamespace()
+ clusterName = "awesomeCluster"
+ cluster = &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: clusterName,
+ Namespace: namespace,
+ },
+ Spec: apiv1.ClusterSpec{
+ StorageConfiguration: apiv1.StorageConfiguration{
+ Size: "1G",
+ },
+ Bootstrap: &apiv1.BootstrapConfiguration{
+ Recovery: &apiv1.BootstrapRecovery{
+ VolumeSnapshots: &apiv1.DataSource{
+ Storage: corev1.TypedLocalObjectReference{
+ APIGroup: ptr.To(volumesnapshot.GroupName),
+ Kind: apiv1.VolumeSnapshotKind,
+ Name: "pgdata",
+ },
+ },
+ },
+ },
+ },
+ }
+ })
+
+ It("should not requeue if bootstrapping from a valid volume snapshot", func(ctx SpecContext) {
+ snapshots := volumesnapshot.VolumeSnapshotList{
+ Items: []volumesnapshot.VolumeSnapshot{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pgdata",
+ Namespace: namespace,
+ Labels: map[string]string{
+ utils.BackupNameLabelName: "backup-one",
+ },
+ Annotations: map[string]string{
+ utils.PvcRoleLabelName: string(utils.PVCRolePgData),
+ },
+ },
+ },
+ },
+ }
+
+ mockClient := fake.NewClientBuilder().
+ WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithLists(&snapshots).
+ Build()
+
+ newClusterReconciler := &ClusterReconciler{
+ Client: mockClient,
+ Scheme: scheme,
+ Recorder: record.NewFakeRecorder(120),
+ DiscoveryClient: discoveryClient,
+ }
+
+ res, err := newClusterReconciler.checkReadyForRecovery(ctx, nil, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(Or(BeNil(), Equal(reconcile.Result{})))
+ })
+
+ It("should requeue if bootstrapping from an invalid volume snapshot", func(ctx SpecContext) {
+ snapshots := volumesnapshot.VolumeSnapshotList{
+ Items: []volumesnapshot.VolumeSnapshot{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "pgdata",
+ Namespace: namespace,
+ Labels: map[string]string{
+ utils.BackupNameLabelName: "backup-one",
+ },
+ Annotations: map[string]string{
+ utils.PvcRoleLabelName: string(utils.PVCRolePgTablespace),
+ },
+ },
+ },
+ },
+ }
+
+ mockClient := fake.NewClientBuilder().
+ WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithLists(&snapshots).
+ Build()
+
+ newClusterReconciler := &ClusterReconciler{
+ Client: mockClient,
+ Scheme: scheme,
+ Recorder: record.NewFakeRecorder(120),
+ DiscoveryClient: discoveryClient,
+ }
+
+ res, err := newClusterReconciler.checkReadyForRecovery(ctx, nil, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).ToNot(BeNil())
+ Expect(res).ToNot(Equal(reconcile.Result{}))
+ })
+
+ It("should requeue if bootstrapping from a snapshot that isn't there", func(ctx SpecContext) {
+ snapshots := volumesnapshot.VolumeSnapshotList{
+ Items: []volumesnapshot.VolumeSnapshot{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "foobar",
+ Namespace: namespace,
+ Labels: map[string]string{
+ utils.BackupNameLabelName: "backup-one",
+ },
+ Annotations: map[string]string{
+ utils.PvcRoleLabelName: string(utils.PVCRolePgData),
+ },
+ },
+ },
+ },
+ }
+
+ mockClient := fake.NewClientBuilder().
+ WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithLists(&snapshots).
+ Build()
+
+ newClusterReconciler := &ClusterReconciler{
+ Client: mockClient,
+ Scheme: scheme,
+ Recorder: record.NewFakeRecorder(120),
+ DiscoveryClient: discoveryClient,
+ }
+
+ res, err := newClusterReconciler.checkReadyForRecovery(ctx, nil, cluster)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).ToNot(BeNil())
+ Expect(res).ToNot(Equal(reconcile.Result{}))
+ })
+})
+
var _ = Describe("Set cluster metadata of service account", func() {
It("must be idempotent, if metadata are not defined", func() {
sa := &corev1.ServiceAccount{}
@@ -790,3 +990,109 @@ var _ = Describe("createOrPatchOwnedPodDisruptionBudget", func() {
})
})
})
+
+var _ = Describe("deletePodDisruptionBudgetIfExists", func() {
+ const namespace = "default"
+
+ var (
+ fakeClient k8client.Client
+ reconciler *ClusterReconciler
+ cluster *apiv1.Cluster
+ pdbPrimary *policyv1.PodDisruptionBudget
+ pdb *policyv1.PodDisruptionBudget
+ )
+
+ BeforeEach(func() {
+ cluster = &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-cluster",
+ Namespace: namespace,
+ },
+ }
+ pdbPrimary = &policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: cluster.Name + apiv1.PrimaryPodDisruptionBudgetSuffix,
+ Namespace: namespace,
+ Labels: map[string]string{
+ "test": "value",
+ },
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "example"},
+ },
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ }
+ pdb = &policyv1.PodDisruptionBudget{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: cluster.Name,
+ Namespace: namespace,
+ Labels: map[string]string{
+ "test": "value",
+ },
+ },
+ Spec: policyv1.PodDisruptionBudgetSpec{
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "example"},
+ },
+ MinAvailable: &intstr.IntOrString{
+ Type: intstr.Int,
+ IntVal: 1,
+ },
+ },
+ }
+
+ fakeClient = fake.NewClientBuilder().
+ WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithObjects(cluster, pdbPrimary, pdb).
+ Build()
+
+ reconciler = &ClusterReconciler{
+ Client: fakeClient,
+ Recorder: record.NewFakeRecorder(10000),
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ }
+ })
+
+ It("should delete the existing PDBs", func(ctx SpecContext) {
+ err := fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdbPrimary), &policyv1.PodDisruptionBudget{})
+ Expect(err).ToNot(HaveOccurred())
+
+ err = fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdbPrimary), &policyv1.PodDisruptionBudget{})
+ Expect(err).ToNot(HaveOccurred())
+
+ err = reconciler.deletePodDisruptionBudgetIfExists(ctx, cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdbPrimary), &policyv1.PodDisruptionBudget{})
+ Expect(apierrs.IsNotFound(err)).To(BeTrue())
+
+ err = fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdb), &policyv1.PodDisruptionBudget{})
+ Expect(apierrs.IsNotFound(err)).To(BeTrue())
+ })
+
+ It("should be able to delete the PDB when the primary PDB is missing", func(ctx SpecContext) {
+ fakeClient = fake.NewClientBuilder().
+ WithScheme(schemeBuilder.BuildWithAllKnownScheme()).
+ WithObjects(cluster, pdb).
+ Build()
+ reconciler = &ClusterReconciler{
+ Client: fakeClient,
+ Recorder: record.NewFakeRecorder(10000),
+ Scheme: schemeBuilder.BuildWithAllKnownScheme(),
+ }
+
+ err := fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdbPrimary), &policyv1.PodDisruptionBudget{})
+ Expect(apierrs.IsNotFound(err)).To(BeTrue())
+
+ err = reconciler.deletePodDisruptionBudgetIfExists(ctx, cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = fakeClient.Get(ctx, k8client.ObjectKeyFromObject(pdb), &policyv1.PodDisruptionBudget{})
+ Expect(apierrs.IsNotFound(err)).To(BeTrue())
+ })
+})
diff --git a/controllers/cluster_plugins.go b/controllers/cluster_plugins.go
new file mode 100644
index 0000000000..26ae303d0c
--- /dev/null
+++ b/controllers/cluster_plugins.go
@@ -0,0 +1,58 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package controllers contains the controller of the CRD
+package controllers
+
+import (
+ "context"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+// updatePluginsStatus ensures that we load the plugins that are required to reconcile
+// this cluster
+func (r *ClusterReconciler) updatePluginsStatus(ctx context.Context, cluster *apiv1.Cluster) error {
+ contextLogger := log.FromContext(ctx)
+
+ // Load the plugins
+ pluginClient, err := cluster.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error loading plugins, retrying")
+ return err
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ // Get the status of the plugins and store it inside the status section
+ oldCluster := cluster.DeepCopy()
+ metadataList := pluginClient.MetadataList()
+ cluster.Status.PluginStatus = make([]apiv1.PluginStatus, len(metadataList))
+ for i, entry := range metadataList {
+ cluster.Status.PluginStatus[i].Name = entry.Name
+ cluster.Status.PluginStatus[i].Version = entry.Version
+ cluster.Status.PluginStatus[i].Capabilities = entry.Capabilities
+ cluster.Status.PluginStatus[i].OperatorCapabilities = entry.OperatorCapabilities
+ cluster.Status.PluginStatus[i].WALCapabilities = entry.WALCapabilities
+ cluster.Status.PluginStatus[i].BackupCapabilities = entry.BackupCapabilities
+ }
+
+ return r.Client.Status().Patch(ctx, cluster, client.MergeFrom(oldCluster))
+}
diff --git a/controllers/cluster_restore.go b/controllers/cluster_restore.go
index 0694cfafa0..2a77dcffed 100644
--- a/controllers/cluster_restore.go
+++ b/controllers/cluster_restore.go
@@ -18,8 +18,11 @@ package controllers
import (
"context"
+ "fmt"
corev1 "k8s.io/api/core/v1"
+ apierrs "k8s.io/apimachinery/pkg/api/errors"
+ ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -31,29 +34,39 @@ import (
// reconcileRestoredCluster ensures that we own again any orphan resources when cluster gets reconciled for
// the first time
-func (r *ClusterReconciler) reconcileRestoredCluster(ctx context.Context, cluster *apiv1.Cluster) error {
+func (r *ClusterReconciler) reconcileRestoredCluster(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+) (*ctrl.Result, error) {
contextLogger := log.FromContext(ctx)
// No need to check this on a cluster which has been already deployed
if cluster.Status.LatestGeneratedNode != 0 {
- return nil
+ return nil, nil
}
// Get the list of PVCs belonging to this cluster but not owned by it
pvcs, err := getOrphanPVCs(ctx, r.Client, cluster)
if err != nil {
- return err
+ return nil, err
}
if len(pvcs) == 0 {
contextLogger.Info("no orphan PVCs found, skipping the restored cluster reconciliation")
- return nil
+ return nil, nil
}
-
contextLogger.Info("found orphan pvcs, trying to restore the cluster", "pvcs", pvcs)
+ if res, err := ensureClusterRestoreCanStart(ctx, r.Client, cluster); res != nil || err != nil {
+ return res, err
+ }
+
+ if err := ensureOrphanPodsAreDeleted(ctx, r.Client, cluster); err != nil {
+ return nil, fmt.Errorf("encountered an error while deleting an orphan pod: %w", err)
+ }
+
highestSerial, primarySerial, err := getNodeSerialsFromPVCs(pvcs)
if err != nil {
- return err
+ return nil, err
}
if primarySerial == 0 {
@@ -63,16 +76,27 @@ func (r *ClusterReconciler) reconcileRestoredCluster(ctx context.Context, cluste
contextLogger.Debug("proceeding to remove the fencing annotation if present")
if err := ensureClusterIsNotFenced(ctx, r.Client, cluster); err != nil {
- return err
+ return nil, err
}
contextLogger.Debug("proceeding to restore the cluster status")
if err := restoreClusterStatus(ctx, r.Client, cluster, highestSerial, primarySerial); err != nil {
- return err
+ return nil, err
}
contextLogger.Debug("restored the cluster status, proceeding to restore the orphan PVCS")
- return restoreOrphanPVCs(ctx, r.Client, cluster, pvcs)
+ return nil, restoreOrphanPVCs(ctx, r.Client, cluster, pvcs)
+}
+
+// ensureClusterRestoreCanStart is a function where the plugins can inject their custom logic to tell the
+// restore process to wait before starting the process
+// nolint: revive
+func ensureClusterRestoreCanStart(
+ ctx context.Context,
+ c client.Client,
+ cluster *apiv1.Cluster,
+) (*ctrl.Result, error) {
+ return nil, nil
}
func ensureClusterIsNotFenced(
@@ -148,6 +172,49 @@ func getOrphanPVCs(
return orphanPVCs, nil
}
+func ensureOrphanPodsAreDeleted(ctx context.Context, c client.Client, cluster *apiv1.Cluster) error {
+ contextLogger := log.FromContext(ctx).WithValues("orphan_pod_cleaner")
+
+ var podList corev1.PodList
+ if err := c.List(
+ ctx,
+ &podList,
+ client.InNamespace(cluster.Namespace),
+ client.MatchingLabels{utils.ClusterLabelName: cluster.Name},
+ ); err != nil {
+ return err
+ }
+
+ orphanPodList := make([]corev1.Pod, 0, podList.Size())
+ orphanPodNames := make([]string, 0, podList.Size())
+ for idx := range podList.Items {
+ pod := podList.Items[idx]
+ if len(pod.OwnerReferences) == 0 {
+ orphanPodList = append(orphanPodList, pod)
+ orphanPodNames = append(orphanPodNames, pod.Name)
+ }
+ }
+
+ if len(orphanPodList) == 0 {
+ return nil
+ }
+
+ contextLogger.Info(
+ "Found one or more orphan pods, deleting them",
+ "orphanPodNames", orphanPodNames,
+ )
+
+ for idx := range orphanPodList {
+ pod := orphanPodList[idx]
+ contextLogger.Debug("Deleting orphan pod", "podName", pod.Name)
+ if err := c.Delete(ctx, &pod); err != nil && !apierrs.IsNotFound(err) {
+ return err
+ }
+ }
+
+ return nil
+}
+
// getNodeSerialsFromPVCs tries to obtain the highestSerial and the primary serial from a group of PVCs
func getNodeSerialsFromPVCs(
pvcs []corev1.PersistentVolumeClaim,
diff --git a/controllers/plugins.go b/controllers/plugins.go
new file mode 100644
index 0000000000..6b02e991df
--- /dev/null
+++ b/controllers/plugins.go
@@ -0,0 +1,75 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package controllers
+
+import (
+ "context"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ cnpiClient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+// preReconcilePluginHooks ensures we call the pre-reconcile plugin hooks
+func preReconcilePluginHooks(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ object client.Object,
+) cnpiClient.ReconcilerHookResult {
+ contextLogger := log.FromContext(ctx)
+
+ // Load the plugins
+ pluginClient, err := cluster.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error loading plugins, retrying")
+ return cnpiClient.ReconcilerHookResult{
+ Err: err,
+ StopReconciliation: true,
+ }
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ return pluginClient.PreReconcile(ctx, cluster, object)
+}
+
+// postReconcilePluginHooks ensures we call the post-reconcile plugin hooks
+func postReconcilePluginHooks(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ object client.Object,
+) cnpiClient.ReconcilerHookResult {
+ contextLogger := log.FromContext(ctx)
+
+ // Load the plugins
+ pluginClient, err := cluster.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Error(err, "Error loading plugins, retrying")
+ return cnpiClient.ReconcilerHookResult{
+ Err: err,
+ StopReconciliation: true,
+ }
+ }
+ defer func() {
+ pluginClient.Close(ctx)
+ }()
+
+ return pluginClient.PostReconcile(ctx, cluster, object)
+}
diff --git a/controllers/pooler_controller_test.go b/controllers/pooler_controller_test.go
index 2687d0b6f9..d58738e12a 100644
--- a/controllers/pooler_controller_test.go
+++ b/controllers/pooler_controller_test.go
@@ -137,7 +137,7 @@ var _ = Describe("pooler_controller unit tests", func() {
pooler3.Spec.PgBouncer.AuthQuerySecret = &v1.LocalObjectReference{
Name: "test-one",
}
- pooler3.Spec.PgBouncer.AuthQuery = "SELECT usename, passwd FROM pg_shadow WHERE usename=$1"
+ pooler3.Spec.PgBouncer.AuthQuery = "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1"
err := k8sClient.Update(ctx, &pooler3)
Expect(err).ToNot(HaveOccurred())
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index b5478083e5..5965dd21f1 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -497,6 +497,41 @@ the selected PostgreSQL instance
+## BackupPluginConfiguration {#postgresql-cnpg-io-v1-BackupPluginConfiguration}
+
+
+**Appears in:**
+
+- [BackupSpec](#postgresql-cnpg-io-v1-BackupSpec)
+
+- [ScheduledBackupSpec](#postgresql-cnpg-io-v1-ScheduledBackupSpec)
+
+
+BackupPluginConfiguration contains the backup configuration used by
+the backup plugin
+
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the plugin managing this backup
+ |
+
+parameters
+map[string]string
+ |
+
+ Parameters are the configuration parameters passed to the backup
+plugin for this backup
+ |
+
+
+
+
## BackupSnapshotElementStatus {#postgresql-cnpg-io-v1-BackupSnapshotElementStatus}
@@ -630,8 +665,15 @@ standby, if available.
BackupMethod
- The backup method to be used, possible options are barmanObjectStore
-and volumeSnapshot . Defaults to: barmanObjectStore .
+ The backup method to be used, possible options are barmanObjectStore ,
+volumeSnapshot or plugin . Defaults to: barmanObjectStore .
+ |
+
+pluginConfiguration
+BackupPluginConfiguration
+ |
+
+ Configuration parameters passed to the plugin managing this backup
|
online
@@ -1846,6 +1888,28 @@ Defaults to: RuntimeDefault
The tablespaces configuration
|
+enablePDB
+bool
+ |
+
+ Manage the PodDisruptionBudget resources within the cluster. When
+configured as true (default setting), the pod disruption budgets
+will safeguard the primary node from being terminated. Conversely,
+setting it to false will result in the absence of any
+PodDisruptionBudget resource, permitting the shutdown of all nodes
+hosting the PostgreSQL cluster. This latter configuration is
+advisable for any PostgreSQL cluster employed for
+development/staging purposes.
+ |
+
+plugins [Required]
+PluginConfigurationList
+ |
+
+ The plugins configuration, containing
+any plugin to be loaded with the corresponding configuration
+ |
+
@@ -2168,6 +2232,13 @@ This field is reported when .spec.failoverDelay
is populated or dur
Image contains the image name used by the pods
+pluginStatus [Required]
+[]PluginStatus
+ |
+
+ PluginStatus is the status of the loaded plugins
+ |
+
@@ -3363,7 +3434,7 @@ by pgbouncer
The credentials of the user that need to be used for the authentication
query. In case it is specified, also an AuthQuery
-(e.g. "SELECT usename, passwd FROM pg_shadow WHERE usename=$1")
+(e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
has to be specified and no automatic CNPG Cluster integration will be triggered.
|
@@ -3372,7 +3443,7 @@ has to be specified and no automatic CNPG Cluster integration will be triggered.
The query that will be used to download the hash of the password
-of a certain user. Default: "SELECT usename, passwd FROM user_search($1)".
+of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
In case it is specified, also an AuthQuerySecret has to be specified and
no automatic CNPG Cluster integration will be triggered.
|
@@ -3406,6 +3477,70 @@ the operator calls PgBouncer's PAUSE
and RESUME
comman
+## PluginStatus {#postgresql-cnpg-io-v1-PluginStatus}
+
+
+**Appears in:**
+
+- [ClusterStatus](#postgresql-cnpg-io-v1-ClusterStatus)
+
+
+PluginStatus is the status of a loaded plugin
+
+
+
+Field | Description |
+
+name [Required]
+string
+ |
+
+ Name is the name of the plugin
+ |
+
+version [Required]
+string
+ |
+
+ Version is the version of the plugin loaded by the
+latest reconciliation loop
+ |
+
+capabilities [Required]
+[]string
+ |
+
+ Capabilities are the list of capabilities of the
+plugin
+ |
+
+operatorCapabilities [Required]
+[]string
+ |
+
+ OperatorCapabilities are the list of capabilities of the
+plugin regarding the reconciler
+ |
+
+walCapabilities [Required]
+[]string
+ |
+
+ WALCapabilities are the list of capabilities of the
+plugin regarding the WAL management
+ |
+
+backupCapabilities [Required]
+[]string
+ |
+
+ BackupCapabilities are the list of capabilities of the
+plugin regarding the Backup management
+ |
+
+
+
+
## PodTemplateSpec {#postgresql-cnpg-io-v1-PodTemplateSpec}
@@ -4313,6 +4448,13 @@ standby, if available.
and volumeSnapshot
. Defaults to: barmanObjectStore
.
+pluginConfiguration
+BackupPluginConfiguration
+ |
+
+ Configuration parameters passed to the plugin managing this backup
+ |
+
online
bool
|
diff --git a/docs/src/connection_pooling.md b/docs/src/connection_pooling.md
index 0ebd8cf3d5..da8ea378e1 100644
--- a/docs/src/connection_pooling.md
+++ b/docs/src/connection_pooling.md
@@ -156,22 +156,27 @@ Then, for each application database, grant the permission for
GRANT CONNECT ON DATABASE { database name here } TO cnpg_pooler_pgbouncer;
```
-Finally, connect in each application database, and then create the authentication
-function inside each of the application databases:
+Finally, as a *superuser* connect in each application database, and then create
+the authentication function inside each of the application databases:
```sql
-CREATE OR REPLACE FUNCTION user_search(uname TEXT)
+CREATE OR REPLACE FUNCTION public.user_search(uname TEXT)
RETURNS TABLE (usename name, passwd text)
LANGUAGE sql SECURITY DEFINER AS
- 'SELECT usename, passwd FROM pg_shadow WHERE usename=$1;';
+ 'SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1;';
-REVOKE ALL ON FUNCTION user_search(text)
+REVOKE ALL ON FUNCTION public.user_search(text)
FROM public;
-GRANT EXECUTE ON FUNCTION user_search(text)
+GRANT EXECUTE ON FUNCTION public.user_search(text)
TO cnpg_pooler_pgbouncer;
```
+!!! Important
+ Given that `user_search` is a `SECURITY DEFINER` function, you need to
+ create it through a role with `SUPERUSER` privileges, such as the `postgres`
+ user.
+
## Pod templates
You can take advantage of pod templates specification in the `template`
diff --git a/docs/src/e2e.md b/docs/src/e2e.md
index cc93ce8972..9a32f0ebe0 100644
--- a/docs/src/e2e.md
+++ b/docs/src/e2e.md
@@ -1,90 +1,129 @@
# End-to-End Tests
CloudNativePG is automatically tested after each
-commit via a suite of **End-to-end (E2E) tests**, which ensures that
-the operator correctly deploys and manages PostgreSQL clusters.
+commit via a suite of **End-to-end (E2E) tests** (or integration tests)
+which ensure that the operator correctly deploys and manages PostgreSQL
+clusters.
-Moreover, the following Kubernetes versions are tested for each commit,
-helping detect bugs at an early stage of the development process:
+Kubernetes versions 1.23 through 1.29, and PostgreSQL versions 12 through 16,
+are tested for each commit, helping detect bugs at an early stage of the
+development process.
-* 1.27
-* 1.26
-* 1.25
-* 1.24
-* 1.23
+For each tested version of Kubernetes and PostgreSQL, a Kubernetes
+cluster is created using [kind](https://kind.sigs.k8s.io/), run on the GitHub
+Actions platform,
+and the following suite of E2E tests are performed on that cluster:
-The following PostgreSQL versions are tested:
+* **Basic:**
+ * Installation of the operator
+ * Creation of a Cluster
+ * Usage of a persistent volume for data storage
-* PostgreSQL 15
-* PostgreSQL 14
-* PostgreSQL 13
-* PostgreSQL 12
-* PostgreSQL 11
+* **Service connectivity:**
+ * Connection via services, including read-only
+ * Connection via user-provided server and/or client certificates
+ * PgBouncer
-For each tested version of Kubernetes and PostgreSQL, a Kubernetes
-cluster is created using [kind](https://kind.sigs.k8s.io/),
-and the following suite of E2E tests are performed on that cluster:
+* **Self-healing:**
+ * Failover
+ * Switchover
+ * Primary endpoint switch in case of failover in less than 10 seconds
+ * Primary endpoint switch in case of switchover in less than 20 seconds
+ * Recover from a degraded state in less than 60 seconds
+ * PVC Deletion
+ * Corrupted PVC
+
+* **Backup and Restore:**
+ * Backup and restore from Volume Snapshots
+ * Backup and ScheduledBackups execution using Barman Cloud on S3
+ * Backup and ScheduledBackups execution using Barman Cloud on Azure
+ blob storage
+ * Restore from backup using Barman Cloud on S3
+ * Restore from backup using Barman Cloud on Azure blob storage
+ * Point-in-time recovery (PITR) on Azure, S3 storage
+ * Wal-Restore (sequential / parallel)
+
+* **Operator:**
+ * Operator Deployment
+ * Operator configuration via ConfigMap
+ * Operator pod deletion
+ * Operator pod eviction
+ * Operator upgrade
+ * Operator High Availability
+
+* **Observability:**
+ * Metrics collection
+ * PgBouncer Metrics
+ * JSON log format
+
+* **Replication:**
+ * Replication Slots
+ * Synchronous replication
+ * Scale-up and scale-down of a Cluster
+
+* **Replica clusters**
+ * Bootstrapping a replica cluster from backup
+ * Bootstrapping a replica cluster via streaming
+ * Bootstrapping via volume snapshots
+ * Detaching a replica cluster
+
+* **Plugin:**
+ * Cluster Hibernation using CNPG plugin
+ * Fencing
+ * Creation of a connection certificate
+
+* **Postgres Configuration:**
+ * Manage PostgreSQL configuration changes
+ * Rolling updates when changing PostgreSQL images
+ * Rolling updates when changing ImageCatalog/ClusterImageCatalog images
+ * Rolling updates on hot standby sensitive parameter changes
+ * Database initialization via InitDB
+
+* **Pod Scheduling:**
+ * Tolerations and taints
+ * Pod affinity using `NodeSelector`
+ * Rolling updates on PodSpec drift detection
+ * In-place upgrades
+ * Multi-Arch availability
+
+* **Cluster Metadata:**
+ * ConfigMap for Cluster Labels and Annotations
+ * Object metadata
+
+* **Recovery:**
+ * Data corruption
+ * pg_basebackup
+
+* **Importing Databases:**
+ * Microservice approach
+ * Monolith approach
+
+* **Storage:**
+ * Storage expansion
+ * Dedicated PG_WAL persistent volume
+
+* **Security:**
+ * AppArmor annotation propagation. Executed only on Azure environment
+
+* **Maintenance:**
+ * Node Drain with maintenance window
+ * Node Drain with single-instance cluster with/without Pod Disruption Budgets
+
+* **Hibernation**
+ * Declarative hibernation / rehydration
+
+* **Volume snapshots**
+ * Backup/restore for cold and online snapshots
+ * Point-in-time recovery (PITR) for cold and online snapshots
+ * Backups via plugin for cold and online snapshots
+ * Declarative backups for cold and online snapshots
+
+* **Managed Roles**
+ * Creation and update of managed roles
+ * Password maintenance using Kubernetes secrets
-- **Basic:**
- * Installation of the operator;
- * Creation of a Cluster;
- * Usage of a persistent volume for data storage;
-- **Service connectivity:**
- * Connection via services, including read-only;
- * Connection via user-provided server and/or client certificates;
- * PgBouncer;
-- **Self-healing:**
- * Failover;
- * Switchover;
- * Primary endpoint switch in case of failover in less than 10 seconds;
- * Primary endpoint switch in case of switchover in less than 20 seconds;
- * Recover from a degraded state in less than 60 seconds;
- * PVC Deletion;
-- **Backup and Restore:**
- * Backup and ScheduledBackups execution using Barman Cloud on S3;
- * Backup and ScheduledBackups execution using Barman Cloud on Azure
- blob storage;
- * Restore from backup using Barman Cloud on S3;
- * Restore from backup using Barman Cloud on Azure blob storage;
- * Wal-Restore;
-- **Operator:**
- * Operator Deployment;
- * Operator configuration via ConfigMap;
- * Operator pod deletion;
- * Operator pod eviction;
- * Operator upgrade;
- * Operator High Availability;
-- **Observability:**
- * Metrics collection;
- * PgBouncer Metrics;
- * JSON log format;
-- **Replication:**
- * Physical replica clusters;
- * Replication Slots;
- * Synchronous replication;
- * Scale-up and scale-down of a Cluster;
-- **Plugin:**
- * Cluster Hibernation using CNPG plugin;
- * Fencing;
- * Creation of a connection certificate;
-- **Postgres Configuration:**
- * Manage PostgreSQL configuration changes;
- * Rolling updates when changing PostgreSQL images;
-- **Pod Scheduling:**
- * Tolerations and taints;
- * Pod affinity using `NodeSelector`;
-- **Cluster Metadata:**
- * ConfigMap for Cluster Labels and Annotations;
- * Object metadata;
-- **Recovery:**
- * Data corruption;
- * pg_basebackup;
-- **Importing Databases:**
- * Microservice approach;
- * Monolith approach;
-- **Storage:**
- * Storage expansion;
-- **Security:**
- * AppArmor annotation propagation. Executed only on Azure environment;
-- **Maintenance:**
- * Node Drain;
+* **Tablespaces**
+ * Declarative creation of tablespaces
+ * Declarative creation of temporary tablespaces
+ * Backup / recovery from object storage
+ * Backup / recovery from volume snapshots
diff --git a/docs/src/images/grafana-local.png b/docs/src/images/grafana-local.png
index b82ef58f3d..d938bc57b1 100644
Binary files a/docs/src/images/grafana-local.png and b/docs/src/images/grafana-local.png differ
diff --git a/docs/src/index.md b/docs/src/index.md
index c42d442471..6855bd41d1 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -81,6 +81,7 @@ Additionally, the Community provides images for the [PostGIS extension](postgis.
* Backups on object stores (AWS S3 and S3-compatible, Azure Blob Storage, and Google Cloud Storage)
* Full recovery and Point-In-Time recovery from an existing backup on volume snapshots or object stores
* Offline import of existing PostgreSQL databases, including major upgrades of PostgreSQL
+* Online import of existing PostgreSQL databases, including major upgrades of PostgreSQL, through PostgreSQL native logical replication (imperative, via the `cnpg` plugin)
* Fencing of an entire PostgreSQL cluster, or a subset of the instances in a declarative way
* Hibernation of a PostgreSQL cluster in a declarative way
* Support for Synchronous Replicas
diff --git a/docs/src/installation_upgrade.md b/docs/src/installation_upgrade.md
index 87fb3a20d6..80f958bd5b 100644
--- a/docs/src/installation_upgrade.md
+++ b/docs/src/installation_upgrade.md
@@ -7,12 +7,12 @@
The operator can be installed like any other resource in Kubernetes,
through a YAML manifest applied via `kubectl`.
-You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.22/releases/cnpg-1.22.1.yaml)
+You can install the [latest operator manifest](https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.22/releases/cnpg-1.22.2.yaml)
for this minor release as follows:
```sh
kubectl apply --server-side -f \
- https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.22/releases/cnpg-1.22.1.yaml
+ https://raw.githubusercontent.com/cloudnative-pg/cloudnative-pg/release-1.22/releases/cnpg-1.22.2.yaml
```
You can verify that with:
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
old mode 100644
new mode 100755
index 30d0f11a1f..b8d60cc21f
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -34,7 +34,7 @@ For example, let's install the 1.18.1 release of the plugin, for an Intel based
64 bit server. First, we download the right `.deb` file.
``` sh
-$ wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.18.1/kubectl-cnpg_1.18.1_linux_x86_64.deb
+wget https://github.com/cloudnative-pg/cloudnative-pg/releases/download/v1.18.1/kubectl-cnpg_1.18.1_linux_x86_64.deb
```
Then, install from the local file using `dpkg`:
@@ -116,6 +116,29 @@ operating system and architectures:
* arm 5/6/7
* arm64
+### Configuring auto-completion
+
+To configure auto-completion for the plugin, a helper shell script needs to be
+installed into your current PATH. Assuming the latter contains `/usr/local/bin`,
+this can be done with the following commands:
+
+```shell
+cat > kubectl_complete-cnpg < \
+ [--external-cluster ]
+ [options]
+```
+
+There are two primary use cases:
+
+- With `--external-cluster`: Use this option to create a publication on an
+ external cluster (i.e. defined in the `externalClusters` stanza). The commands
+ will be issued from the ``, but the publication will be for the
+ data in ``.
+
+- Without `--external-cluster`: Use this option to create a publication in the
+ `` PostgreSQL `Cluster` (by default, the `app` database).
+
+!!! Warning
+ When connecting to an external cluster, ensure that the specified user has
+ sufficient permissions to execute the `CREATE PUBLICATION` command.
+
+You have several options, similar to the [`CREATE PUBLICATION`](https://www.postgresql.org/docs/current/sql-createpublication.html)
+command, to define the group of tables to replicate. Notable options include:
+
+- If you specify the `--all-tables` option, you create a publication `FOR ALL TABLES`.
+- Alternatively, you can specify multiple occurrences of:
+ - `--table`: Add a specific table (with an expression) to the publication.
+ - `--schema`: Include all tables in the specified database schema (available
+ from PostgreSQL 15).
+
+The `--dry-run` option enables you to preview the SQL commands that the plugin
+will execute.
+
+For additional information and detailed instructions, type the following
+command:
+
+```sh
+kubectl cnpg publication create --help
+```
+
+##### Example
+
+Given a `source-cluster` and a `destination-cluster`, we would like to create a
+publication for the data on `source-cluster`.
+The `destination-cluster` has an entry in the `externalClusters` stanza pointing
+to `source-cluster`.
+
+We can run:
+
+``` sh
+kubectl cnpg publication create destination-cluster \
+ --external-cluster=source-cluster --all-tables
+```
+
+which will create a publication for all tables on `source-cluster`, running
+the SQL commands on the `destination-cluster`.
+
+Or instead, we can run:
+
+``` sh
+kubectl cnpg publication create source-cluster \
+ --publication=app --all-tables
+```
+
+which will create a publication named `app` for all the tables in the
+`source-cluster`, running the SQL commands on the source cluster.
+
+!!! Info
+ There are two sample files that have been provided for illustration and inspiration:
+ [logical-source](samples/cluster-example-logical-source.yaml) and
+ [logical-destination](samples/cluster-example-logical-destination.yaml).
+
+#### Dropping a publication
+
+The `cnpg publication drop` command seamlessly complements the `create` command
+by offering similar key options, including the publication name, cluster name,
+and an optional external cluster. You can drop a `PUBLICATION` with the
+following command structure:
+
+```sh
+kubectl cnpg publication drop \
+ --publication \
+ [--external-cluster ]
+ [options]
+```
+
+To access further details and precise instructions, use the following command:
+
+```sh
+kubectl cnpg publication drop --help
+```
+
+### Logical Replication Subscriptions
+
+The `cnpg subscription` command group is a dedicated set of commands designed
+to simplify the creation and removal of
+[PostgreSQL logical replication subscriptions](https://www.postgresql.org/docs/current/logical-replication-subscription.html).
+These commands are specifically crafted to aid in the establishment of logical
+replication subscriptions, especially when dealing with remote PostgreSQL
+databases.
+
+!!! Warning
+ Before using these commands, it is essential to have a comprehensive
+ understanding of both the capabilities and limitations of PostgreSQL's
+ native logical replication system.
+ In particular, be mindful of the [logical replication restrictions](https://www.postgresql.org/docs/current/logical-replication-restrictions.html).
+
+In addition to subscription management, we provide a helpful command for
+synchronizing all sequences from the source cluster. While its applicability
+may vary, this command can be particularly useful in scenarios involving major
+upgrades or data import from remote servers.
+
+#### Creating a new subscription
+
+To create a logical replication subscription, use the `cnpg subscription create`
+command. The basic structure of this command is as follows:
+
+```sh
+kubectl cnpg subscription create \
+ --subscription \
+ --publication \
+ --external-cluster \
+ [options]
+```
+
+This command configures a subscription directed towards the specified
+publication in the designated external cluster, as defined in the
+`externalClusters` stanza of the ``.
+
+For additional information and detailed instructions, type the following
+command:
+
+```sh
+kubectl cnpg subscription create --help
+```
+
+##### Example
+
+As in the section on publications, we have a `source-cluster` and a
+`destination-cluster`, and we have already created a publication called
+`app`.
+
+The following command:
+
+``` sh
+kubectl cnpg subscription create destination-cluster \
+ --external-cluster=source-cluster \
+ --publication=app --subscription=app
+```
+
+will create a subscription for `app` on the destination cluster.
+
+!!! Warning
+ Prioritize testing subscriptions in a non-production environment to ensure
+ their effectiveness and identify any potential issues before implementing them
+ in a production setting.
+
+!!! Info
+ There are two sample files that have been provided for illustration and inspiration:
+ [logical-source](samples/cluster-example-logical-source.yaml) and
+ [logical-destination](samples/cluster-example-logical-destination.yaml).
+
+#### Dropping a subscription
+
+The `cnpg subscription drop` command seamlessly complements the `create` command.
+You can drop a `SUBSCRIPTION` with the following command structure:
+
+```sh
+kubectl cnpg subcription drop \
+ --subscription \
+ [options]
+```
+
+To access further details and precise instructions, use the following command:
+
+```sh
+kubectl cnpg subscription drop --help
+```
+
+#### Synchronizing sequences
+
+One notable constraint of PostgreSQL logical replication, implemented through
+publications and subscriptions, is the lack of sequence synchronization. This
+becomes particularly relevant when utilizing logical replication for live
+database migration, especially to a higher version of PostgreSQL. A crucial
+step in this process involves updating sequences before transitioning
+applications to the new database (*cutover*).
+
+To address this limitation, the `cnpg subscription sync-sequences` command
+offers a solution. This command establishes a connection with the source
+database, retrieves all relevant sequences, and subsequently updates local
+sequences with matching identities (based on database schema and sequence
+name).
+
+You can use the command as shown below:
+
+```sh
+kubectl cnpg subscription sync-sequences \
+ --subscription \
+
+```
+
+For comprehensive details and specific instructions, utilize the following
+command:
+
+```sh
+kubectl cnpg subscription sync-sequences --help
+```
+
+##### Example
+
+As in the previous sections for publication and subscription, we have
+a `source-cluster` and a `destination-cluster`. The publication and the
+subscription, both called `app`, are already present.
+
+The following command will synchronize the sequences involved in the
+`app` subscription, from the source cluster into the destination cluster.
+
+``` sh
+kubectl cnpg subscription sync-sequences destination-cluster \
+ --subscription=app
+```
+
+!!! Warning
+ Prioritize testing subscriptions in a non-production environment to
+ guarantee their effectiveness and detect any potential issues before deploying
+ them in a production setting.
+
## Integration with K9s
The `cnpg` plugin can be easily integrated in [K9s](https://k9scli.io/), a
popular terminal-based UI to interact with Kubernetes clusters.
See [`k9s/plugins.yml`](samples/k9s/plugins.yml) for details.
-
diff --git a/docs/src/kubernetes_upgrade.md b/docs/src/kubernetes_upgrade.md
index a4dd7b8d92..c3c142b6a2 100644
--- a/docs/src/kubernetes_upgrade.md
+++ b/docs/src/kubernetes_upgrade.md
@@ -1,54 +1,109 @@
-# Kubernetes Upgrade
+# Kubernetes Upgrade and Maintenance
+
+Maintaining an up-to-date Kubernetes cluster is crucial for ensuring optimal
+performance and security, particularly for self-managed clusters, especially
+those running on bare metal infrastructure. Regular updates help address
+technical debt and mitigate business risks, despite the controlled downtimes
+associated with temporarily removing a node from the cluster for maintenance
+purposes. For further insights on embracing risk in operations, refer to the
+["Embracing Risk"](https://landing.google.com/sre/sre-book/chapters/embracing-risk/)
+chapter from the Site Reliability Engineering book.
-Kubernetes clusters must be kept updated. This becomes even more
-important if you are self-managing your Kubernetes clusters, especially
-on **bare metal**.
+## Importance of Regular Updates
-Planning and executing regular updates is a way for your organization
-to clean up the technical debt and reduce the business risks, despite
-the introduction in your Kubernetes infrastructure of controlled
-downtimes that temporarily take out a node from the cluster for
-maintenance reasons (recommended reading:
-["Embracing Risk"](https://landing.google.com/sre/sre-book/chapters/embracing-risk/)
-from the Site Reliability Engineering book).
+Updating Kubernetes involves planning and executing maintenance tasks, such as
+applying security updates to underlying Linux servers, replacing malfunctioning
+hardware components, or upgrading the cluster to the latest Kubernetes version.
+These activities are essential for maintaining a robust and secure
+infrastructure.
-For example, you might need to apply security updates on the Linux
-servers where Kubernetes is installed, or to replace a malfunctioning
-hardware component such as RAM, CPU, or RAID controller, or even upgrade
-the cluster to the latest version of Kubernetes.
+## Maintenance Operations in a Cluster
-Usually, maintenance operations in a cluster are performed one node
-at a time by:
+Typically, maintenance operations are carried out on one node at a time, following a [structured process](https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade/):
-1. evicting the workloads from the node to be updated (`drain`)
-2. performing the actual operation (for example, system update)
-3. re-joining the node to the cluster (`uncordon`)
+1. eviction of workloads (`drain`): workloads are gracefully moved away from
+ the node to be updated, ensuring a smooth transition.
+2. performing the operation: the actual maintenance operation, such as a
+ system update or hardware replacement, is executed.
+3. rejoining the node to the cluster (`uncordon`): the updated node is
+ reintegrated into the cluster, ready to resume its responsibilities.
-The above process requires workloads to be either stopped for the
-entire duration of the upgrade or migrated to another node.
+This process requires either stopping workloads for the entire upgrade duration
+or migrating them to other nodes in the cluster.
-While the latest case is the expected one in terms of service
-reliability and self-healing capabilities of Kubernetes, there can
-be situations where it is advised to operate with a temporarily
-degraded cluster and wait for the upgraded node to be up again.
+## Temporary PostgreSQL Cluster Degradation
-In particular, if your PostgreSQL cluster relies on **node-local storage**
-\- that is *storage which is local to the Kubernetes worker node where
-the PostgreSQL database is running*.
-Node-local storage (or simply *local storage*) is used to enhance performance.
+While the standard approach ensures service reliability and leverages
+Kubernetes' self-healing capabilities, there are scenarios where operating with
+a temporarily degraded cluster may be acceptable. This is particularly relevant
+for PostgreSQL clusters relying on **node-local storage**, where the storage is
+local to the Kubernetes worker node running the PostgreSQL database. Node-local
+storage, or simply *local storage*, is employed to enhance performance.
!!! Note
- If your database files are on shared storage over the network,
- you may not need to define a maintenance window. If the volumes currently
- used by the pods can be reused by pods running on different nodes after
- the drain, the default self-healing behavior of the operator will work
- fine (you can then skip the rest of this section).
-
-When using local storage for PostgreSQL, you are advised to temporarily
-put the cluster in **maintenance mode** through the `nodeMaintenanceWindow`
-option to avoid standard self-healing procedures to kick in,
-while, for example, enlarging the partition on the physical node or
-updating the node itself.
+ If your database files reside on shared storage accessible over the
+ network, the default self-healing behavior of the operator can efficiently
+ handle scenarios where volumes are reused by pods on different nodes after a
+ drain operation. In such cases, you can skip the remaining sections of this
+ document.
+
+## Pod Disruption Budgets
+
+By default, CloudNativePG safeguards Postgres cluster operations. If a node is
+to be drained and contains a cluster's primary instance, a switchover happens
+ahead of the drain. Once the instance in the node is downgraded to replica, the
+draining can resume.
+For single-instance clusters, a switchover is not possible, so CloudNativePG
+will prevent draining the node where the instance is housed.
+Additionally, in multi-instance clusters, CloudNativePG guarantees that only
+one replica at a time is gracefully shut down during a drain operation.
+
+Each PostgreSQL `Cluster` is equipped with two associated `PodDisruptionBudget`
+resources - you can easily confirm it with the `kubectl get pdb` command.
+
+Our recommendation is to leave pod disruption budgets enabled for every
+production Postgres cluster. This can be effortlessly managed by toggling the
+`.spec.enablePDB` option, as detailed in the
+[API reference](cloudnative-pg.v1.md#postgresql-cnpg-io-v1-ClusterSpec).
+
+## PostgreSQL Clusters used for Development or Testing
+
+For PostgreSQL clusters used for development purposes, often consisting of
+a single instance, it is essential to disable pod disruption budgets. Failure
+to do so will prevent the node hosting that cluster from being drained.
+
+The following example illustrates how to disable pod disruption budgets for a
+1-instance development cluster:
+
+```yaml
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: dev
+spec:
+ instances: 1
+ enablePDB: false
+
+ storage:
+ size: 1Gi
+```
+
+This configuration ensures smoother maintenance procedures without restrictions
+on draining the node during development activities.
+
+## Node Maintenance Window
+
+!!! Important
+ While CloudNativePG will continue supporting the node maintenance window,
+ it is currently recommended to transition to direct control of pod disruption
+ budgets, as explained in the previous section. This section is retained
+ mainly for backward compatibility.
+
+Prior to release 1.23, CloudNativePG had just one declarative mechanism to manage
+Kubernetes upgrades when dealing with local storage: you had to temporarily put
+the cluster in **maintenance mode** through the `nodeMaintenanceWindow` option
+to avoid standard self-healing procedures to kick in, while, for example,
+enlarging the partition on the physical node or updating the node itself.
!!! Warning
Limit the duration of the maintenance window to the shortest
@@ -87,7 +142,13 @@ reusePVC disabled: see section below.
Don't be afraid: it refers to another volume internally used
by the operator - not the PostgreSQL data directory.
-## Single instance clusters with `reusePVC` set to `false`
+!!! Important
+ `PodDisruptionBudget` management can be disabled by setting the
+ `.spec.enablePDB` field to `false`. In that case, the operator won't
+ create `PodDisruptionBudgets` and will delete them if they were
+ previously created.
+
+### Single instance clusters with `reusePVC` set to `false`
!!! Important
We recommend to always create clusters with more
diff --git a/docs/src/labels_annotations.md b/docs/src/labels_annotations.md
index e4039dd357..223ceb8264 100644
--- a/docs/src/labels_annotations.md
+++ b/docs/src/labels_annotations.md
@@ -167,19 +167,24 @@ These predefined annotations are managed by CloudNativePG.
`cnpg.io/reloadedAt`
: Contains the latest cluster `reload` time. `reload` is triggered by the user through a plugin.
-`kubectl.kubernetes.io/restartedAt`
-: When available, the time of last requested restart of a Postgres cluster.
-
`cnpg.io/skipEmptyWalArchiveCheck`
: When set to `true` on a `Cluster` resource, the operator disables the check
that ensures that the WAL archive is empty before writing data. Use at your own
risk.
+`cnpg.io/skipEmptyWalArchiveCheck`
+: When set to `true` on a `Cluster` resource, the operator disables WAL archiving.
+ This will set `archive_mode` to `off` and require a restart of all PostgreSQL
+ instances. Use at your own risk.
+
`cnpg.io/snapshotStartTime`
-: The time a snapshot started.
+: The time a snapshot started.
`cnpg.io/snapshotEndTime`
-: The time a snapshot was marked as ready to use.
+: The time a snapshot was marked as ready to use.
+
+`kubectl.kubernetes.io/restartedAt`
+: When available, the time of last requested restart of a Postgres cluster.
## Prerequisites
diff --git a/docs/src/monitoring.md b/docs/src/monitoring.md
index 9e9a22ba67..ea4c30e0ec 100644
--- a/docs/src/monitoring.md
+++ b/docs/src/monitoring.md
@@ -749,8 +749,7 @@ section for context:
In addition, we provide the "raw" sources for the Prometheus alert rules in the
`alerts.yaml` file.
-The [Grafana dashboard](https://github.com/cloudnative-pg/charts/blob/main/charts/cloudnative-pg/monitoring/grafana-dashboard.json)
-is now part of the official Helm Chart project.
+The [Grafana dashboard](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json) has a dedicated repository now.
Note that, for the configuration of `kube-prometheus-stack`, other fields and
settings are available over what we provide in `kube-stack-config.yaml`.
diff --git a/docs/src/operator_conf.md b/docs/src/operator_conf.md
index b832b18076..81459a12f2 100644
--- a/docs/src/operator_conf.md
+++ b/docs/src/operator_conf.md
@@ -42,6 +42,8 @@ Name | Description
`ENABLE_INSTANCE_MANAGER_INPLACE_UPDATES` | when set to `true`, enables in-place updates of the instance manager after an update of the operator, avoiding rolling updates of the cluster (default `false`)
`MONITORING_QUERIES_CONFIGMAP` | The name of a ConfigMap in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters
`MONITORING_QUERIES_SECRET` | The name of a Secret in the operator's namespace with a set of default queries (to be specified under the key `queries`) to be applied to all created Clusters
+`CERTIFICATE_DURATION` | Determines the lifetime of the generated certificates in days. Default is 90.
+`EXPIRING_CHECK_THRESHOLD` | Determines the threshold, in days, for identifying a certificate as expiring. Default is 7.
`CREATE_ANY_SERVICE` | when set to `true`, will create `-any` service for the cluster. Default is `false`
Values in `INHERITED_ANNOTATIONS` and `INHERITED_LABELS` support path-like wildcards. For example, the value `example.com/*` will match
diff --git a/docs/src/quickstart.md b/docs/src/quickstart.md
index a220fc373a..d1fee6ce7b 100644
--- a/docs/src/quickstart.md
+++ b/docs/src/quickstart.md
@@ -303,8 +303,8 @@ And access Grafana locally at [`http://localhost:3000/`](http://localhost:3000/)
providing the credentials `admin` as username, `prom-operator` as password (defined in `kube-stack-config.yaml`).
CloudNativePG provides a default dashboard for Grafana as part of the official
-[Helm chart](https://github.com/cloudnative-pg/charts). You can download the
-[grafana-dashboard.json](https://github.com/cloudnative-pg/charts/blob/main/charts/cloudnative-pg/monitoring/grafana-dashboard.json)
+[Helm chart](https://github.com/cloudnative-pg/charts). You can also download the
+[grafana-dashboard.json](https://github.com/cloudnative-pg/grafana-dashboards/blob/main/charts/cluster/grafana-dashboard.json)
file and manually importing it via the GUI.
!!! Warning
diff --git a/docs/src/recovery.md b/docs/src/recovery.md
index f7b0d59a74..b5c020cddc 100644
--- a/docs/src/recovery.md
+++ b/docs/src/recovery.md
@@ -170,7 +170,7 @@ spec:
we recommend that you:
1. Start with a single instance replica cluster. The primary instance will
- be recovered using the snapshot, and available WALs form the source cluster.
+ be recovered using the snapshot, and available WALs from the source cluster.
2. Take a snapshot of the primary in the replica cluster.
3. Increase the number of instances in the replica cluster as desired.
diff --git a/docs/src/release_notes/v1.21.md b/docs/src/release_notes/v1.21.md
index 02da86b7e9..b160c86a38 100644
--- a/docs/src/release_notes/v1.21.md
+++ b/docs/src/release_notes/v1.21.md
@@ -6,6 +6,56 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.21)
on the release branch in GitHub.
+## Version 1.21.4
+
+**Release date:** Mar 14, 2024
+
+### Enhancements
+
+- Allow customization of the `wal_level` GUC in PostgreSQL (#4020)
+- Add the `cnpg.io/skipWalArchiving` annotation to disable WAL archiving when
+ set to `enabled` (#4055)
+- Enrich the `cnpg` plugin for `kubectl` with the `publication` and
+ `subscription` command groups to imperatively set up PostgreSQL native
+ logical replication (#4052)
+- Allow customization of `CERTIFICATE_DURATION` and `EXPIRING_CHECK_THRESHOLD`
+ for automated management of TLS certificates handled by the operator (#3686)
+- Introduce initial support for tab-completion with the `cnpg` plugin for
+ `kubectl` (#3875)
+- Retrieve the correct architecture's binary from the corresponding catalog in
+ the running operator image during in-place updates, enabling the operator to
+ inject the correct binary into any Pod with a supported architecture (#3840)
+
+### Fixes
+
+- Properly synchronize PVC group labels with those on the pods, a critical
+ aspect when all pods are deleted and the operator needs to decide which Pod
+ to recreate first (#3930)
+- Disable `wal_sender_timeout` when cloning a replica to prevent timeout errors
+ due to slow connections (#4080)
+- Ensure that volume snapshots are ready before initiating recovery bootstrap
+ procedures, preventing an error condition where recovery with incomplete
+ backups could enter an error loop (#3663)
+- Prevent an error loop when unsetting connection limits in managed roles (#3832)
+- Resolve a corner case in hibernation where the instance pod has been deleted,
+ but the cluster status still has the hibernation condition set to false (#3970)
+- Correctly detect Google Cloud capabilities for Barman Cloud (#3931)
+
+### Security
+
+- Use `Role` instead of `ClusterRole` for operator permissions in OLM,
+ requiring fewer privileges when installed on a per-namespace basis (#3855,
+ #3990)
+- Enforce fully-qualified object names in SQL queries for the PgBouncer pooler
+ (#4080)
+
+### Changes
+
+- Follow Kubernetes recommendations to switch from client-side to server-side
+ application of manifests, requiring the `--server-side` option by default
+ when installing the operator (#3729).
+- Set the default operand image to PostgreSQL 16.2 (#3823).
+
## Version 1.21.3
**Release date:** Feb 2, 2024
diff --git a/docs/src/release_notes/v1.22.md b/docs/src/release_notes/v1.22.md
index 5f409d9e18..07b99d4434 100644
--- a/docs/src/release_notes/v1.22.md
+++ b/docs/src/release_notes/v1.22.md
@@ -6,6 +6,57 @@ For a complete list of changes, please refer to the
[commits](https://github.com/cloudnative-pg/cloudnative-pg/commits/release-1.22)
on the release branch in GitHub.
+## Version 1.22.2
+
+**Release date:** Mar 14, 2024
+
+### Enhancements
+
+- Allow customization of the `wal_level` GUC in PostgreSQL (#4020)
+- Add the `cnpg.io/skipWalArchiving` annotation to disable WAL archiving when
+ set to `enabled` (#4055)
+- Enrich the `cnpg` plugin for `kubectl` with the `publication` and
+ `subscription` command groups to imperatively set up PostgreSQL native
+ logical replication (#4052)
+- Allow customization of `CERTIFICATE_DURATION` and `EXPIRING_CHECK_THRESHOLD`
+ for automated management of TLS certificates handled by the operator (#3686)
+- Retrieve the correct architecture's binary from the corresponding catalog in
+ the running operator image during in-place updates, enabling the operator to
+ inject the correct binary into any Pod with a supported architecture (#3840)
+- Introduce initial support for tab-completion with the `cnpg` plugin for
+ `kubectl` (#3875)
+
+
+### Fixes
+
+- Properly synchronize PVC group labels with those on the pods, a critical
+ aspect when all pods are deleted and the operator needs to decide which Pod
+ to recreate first (#3930)
+- Disable `wal_sender_timeout` when cloning a replica to prevent timeout errors
+ due to slow connections (#4080)
+- Ensure that volume snapshots are ready before initiating recovery bootstrap
+ procedures, preventing an error condition where recovery with incomplete
+ backups could enter an error loop (#3663)
+- Prevent an error loop when unsetting connection limits in managed roles (#3832)
+- Resolve a corner case in hibernation where the instance pod has been deleted,
+ but the cluster status still has the hibernation condition set to false (#3970)
+- Correctly detect Google Cloud capabilities for Barman Cloud (#3931)
+
+### Security
+
+- Use `Role` instead of `ClusterRole` for operator permissions in OLM,
+ requiring fewer privileges when installed on a per-namespace basis (#3855,
+ #3990)
+- Enforce fully-qualified object names in SQL queries for the PgBouncer pooler
+ (#4080)
+
+### Changes
+
+- Follow Kubernetes recommendations to switch from client-side to server-side
+ application of manifests, requiring the `--server-side` option by default
+ when installing the operator (#3729).
+- Set the default operand image to PostgreSQL 16.2 (#3823).
+
## Version 1.22.1
**Release date:** Feb 2, 2024
diff --git a/docs/src/samples/cluster-example-bis-restore-cr.yaml b/docs/src/samples/cluster-example-bis-restore-cr.yaml
new file mode 100644
index 0000000000..4958cadc06
--- /dev/null
+++ b/docs/src/samples/cluster-example-bis-restore-cr.yaml
@@ -0,0 +1,26 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ instances: 3
+
+ storage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+ walStorage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+
+ bootstrap:
+ recovery:
+ volumeSnapshots:
+ storage:
+ name: cluster-example-20231031161103
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ walStorage:
+ name: cluster-example-20231031161103-wal
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+
diff --git a/docs/src/samples/cluster-example-bis-restore.yaml b/docs/src/samples/cluster-example-bis-restore.yaml
new file mode 100644
index 0000000000..7f814a89fe
--- /dev/null
+++ b/docs/src/samples/cluster-example-bis-restore.yaml
@@ -0,0 +1,43 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-restore
+spec:
+ instances: 3
+ imageName: registry.dev:5000/postgresql:16
+
+ storage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+ walStorage:
+ size: 1Gi
+ storageClass: csi-hostpath-sc
+
+ # Backup properties
+ # This assumes a local minio setup
+# backup:
+# barmanObjectStore:
+# destinationPath: s3://backups/
+# endpointURL: http://minio:9000
+# s3Credentials:
+# accessKeyId:
+# name: minio
+# key: ACCESS_KEY_ID
+# secretAccessKey:
+# name: minio
+# key: ACCESS_SECRET_KEY
+# wal:
+# compression: gzip
+
+ bootstrap:
+ recovery:
+ volumeSnapshots:
+ storage:
+ name: snapshot-0bc6095db42768c7a1fe897494a966f541ef5fb29b2eb8e9399d80bd0a32408a-2023-11-13-7.41.53
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ walStorage:
+ name: snapshot-a67084ba08097fd8c3e34c6afef8110091da67e5895f0379fd2df5b9f73ff524-2023-11-13-7.41.53
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+
diff --git a/docs/src/samples/cluster-example-bis.yaml b/docs/src/samples/cluster-example-bis.yaml
new file mode 100644
index 0000000000..a99b205f1b
--- /dev/null
+++ b/docs/src/samples/cluster-example-bis.yaml
@@ -0,0 +1,29 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example
+spec:
+ instances: 3
+ imageName: registry.dev:5000/postgresql:16
+
+ backup:
+ volumeSnapshot:
+ className: csi-hostpath-groupsnapclass
+ #className: csi-hostpath-snapclass
+ groupSnapshot: true
+
+ storage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+ walStorage:
+ storageClass: csi-hostpath-sc
+ size: 1Gi
+ # tablespaces:
+ # first:
+ # storage:
+ # storageClass: csi-hostpath-sc
+ # size: 1Gi
+ # second:
+ # storage:
+ # storageClass: csi-hostpath-sc
+ # size: 1Gi
diff --git a/docs/src/samples/cluster-example-logical-destination.yaml b/docs/src/samples/cluster-example-logical-destination.yaml
new file mode 100644
index 0000000000..75cb3f2af2
--- /dev/null
+++ b/docs/src/samples/cluster-example-logical-destination.yaml
@@ -0,0 +1,33 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example-dest
+spec:
+ instances: 1
+
+ storage:
+ size: 1Gi
+
+ bootstrap:
+ initdb:
+ import:
+ type: microservice
+ schemaOnly: true
+ databases:
+ - app
+ source:
+ externalCluster: cluster-example
+
+ externalClusters:
+ - name: cluster-example
+ connectionParameters:
+ host: cluster-example-rw.default.svc
+ # We're using the superuser to allow the publication to be
+ # created directly when connected to the target server.
+ # See cluster-example-logical-source.yaml for more information
+ # about this.
+ user: postgres
+ dbname: app
+ password:
+ name: cluster-example-superuser
+ key: password
diff --git a/docs/src/samples/cluster-example-logical-source.yaml b/docs/src/samples/cluster-example-logical-source.yaml
new file mode 100644
index 0000000000..ad9f888353
--- /dev/null
+++ b/docs/src/samples/cluster-example-logical-source.yaml
@@ -0,0 +1,32 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-example
+spec:
+ instances: 1
+
+ imageName: ghcr.io/cloudnative-pg/postgresql:13
+
+ storage:
+ size: 1Gi
+
+ bootstrap:
+ initdb:
+ postInitApplicationSQL:
+ - CREATE TABLE numbers (i SERIAL PRIMARY KEY, m INTEGER)
+ - INSERT INTO numbers (m) (SELECT generate_series(1,10000))
+ - ALTER TABLE numbers OWNER TO app;
+ - CREATE TABLE numbers_two (i SERIAL PRIMARY KEY, m INTEGER)
+ - INSERT INTO numbers_two (m) (SELECT generate_series(1,10000))
+ - ALTER TABLE numbers_two OWNER TO app;
+ - CREATE TABLE numbers_three (i SERIAL PRIMARY KEY, m INTEGER)
+ - INSERT INTO numbers_three (m) (SELECT generate_series(1,10000))
+ - ALTER TABLE numbers_three OWNER TO app;
+
+ enableSuperuserAccess: true
+
+ managed:
+ roles:
+ - name: app
+ login: true
+ replication: true
diff --git a/docs/src/samples/pooler-basic-auth.yaml b/docs/src/samples/pooler-basic-auth.yaml
index 257fbe6db5..a28ae33444 100644
--- a/docs/src/samples/pooler-basic-auth.yaml
+++ b/docs/src/samples/pooler-basic-auth.yaml
@@ -12,4 +12,4 @@ spec:
poolMode: session
authQuerySecret:
name: cluster-example-superuser
- authQuery: SELECT usename, passwd FROM pg_shadow WHERE usename=$1
+ authQuery: SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1
diff --git a/docs/src/ssl_connections.md b/docs/src/ssl_connections.md
index c5619ab82e..b813e67960 100644
--- a/docs/src/ssl_connections.md
+++ b/docs/src/ssl_connections.md
@@ -60,9 +60,11 @@ Certificate:
```
As you can see, TLS client certificates by default are created with 90 days of
-validity and with a simple CN that corresponds to the username in PostgreSQL.
-This is necessary to leverage the `cert` authentication method for `hostssl`
-entries in `pg_hba.conf`.
+validity, and with a simple CN that corresponds to the username in PostgreSQL.
+You can specify the validity and threshold values using the
+`EXPIRE_CHECK_THRESHOLD` and `CERTIFICATE_DURATION` parameters. This is
+necessary to leverage the `cert` authentication method for `hostssl` entries in
+`pg_hba.conf`.
## Testing the connection via a TLS certificate
diff --git a/docs/src/supported_releases.md b/docs/src/supported_releases.md
index 0e9ebd9c81..84f70fe7af 100644
--- a/docs/src/supported_releases.md
+++ b/docs/src/supported_releases.md
@@ -65,8 +65,8 @@ Git tags for versions are prepended with `v`.
| Version | Currently supported | Release date | End of life | Supported Kubernetes versions | Tested, but not supported | Supported Postgres versions |
|-----------------|----------------------|-------------------|---------------------|-------------------------------|---------------------------|-----------------------------|
-| 1.22.x | Yes | December 21, 2023 | ~ May 21, 2024 | 1.26, 1.27, 1.28 | 1.23, 1.24, 1.25 | 12 - 16 |
-| 1.21.x | Yes | October 12, 2023 | ~ April 12, 2024 | 1.25, 1.26, 1.27, 1.28 | 1.23, 1.24 | 12 - 16 |
+| 1.22.x | Yes | December 21, 2023 | ~ July/August, 2024 | 1.26, 1.27, 1.28 | 1.23, 1.24, 1.25 | 12 - 16 |
+| 1.21.x | Yes | October 12, 2023 | ~ May 23, 2024 | 1.25, 1.26, 1.27, 1.28 | 1.23, 1.24 | 12 - 16 |
| main | No, development only | | | | | 11 - 16 |
The list of supported Kubernetes versions in the table depends on what
@@ -98,8 +98,8 @@ version of PostgreSQL, we might not be able to help you.
| Version | Release date | End of life | Supported Kubernetes versions |
|-----------------|-----------------------|---------------------------|-------------------------------|
-| 1.23.0 | February 22, 2024 | - | - |
-| 1.24.0 | April 23, 2024 | - | - |
+| 1.23.0 | April 23, 2024 | - | - |
+| 1.24.0 | June/July, 2024 | - | - |
!!! Note
Feature freeze happens one week before the release
diff --git a/go.mod b/go.mod
index 784cb360f8..25ff8120a3 100644
--- a/go.mod
+++ b/go.mod
@@ -8,18 +8,21 @@ require (
github.com/avast/retry-go/v4 v4.5.1
github.com/blang/semver v3.5.1+incompatible
github.com/cheynewallace/tabby v1.1.1
+ github.com/cloudnative-pg/cnpg-i v0.0.0-20240301101346-b0b310788fa1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
+ github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.1
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/google/uuid v1.6.0
- github.com/jackc/pgx/v5 v5.5.4
+ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0
+ github.com/jackc/pgx/v5 v5.5.5
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
github.com/kubernetes-csi/external-snapshotter/client/v7 v7.0.0
github.com/lib/pq v1.10.9
github.com/logrusorgru/aurora/v4 v4.0.0
github.com/mitchellh/go-ps v1.0.0
- github.com/onsi/ginkgo/v2 v2.16.0
- github.com/onsi/gomega v1.31.1
+ github.com/onsi/ginkgo/v2 v2.17.1
+ github.com/onsi/gomega v1.32.0
github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.72.0
github.com/prometheus/client_golang v1.19.0
github.com/robfig/cron v1.2.0
@@ -28,17 +31,19 @@ require (
github.com/spf13/pflag v1.0.5
github.com/thoas/go-funk v0.9.3
go.uber.org/atomic v1.11.0
+ go.uber.org/multierr v1.11.0
go.uber.org/zap v1.27.0
- golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
+ golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
golang.org/x/sys v0.18.0
+ google.golang.org/grpc v1.62.1
gopkg.in/yaml.v3 v3.0.1
- k8s.io/api v0.29.2
- k8s.io/apiextensions-apiserver v0.29.2
- k8s.io/apimachinery v0.29.2
- k8s.io/cli-runtime v0.29.2
- k8s.io/client-go v0.29.2
+ k8s.io/api v0.29.3
+ k8s.io/apiextensions-apiserver v0.29.3
+ k8s.io/apimachinery v0.29.3
+ k8s.io/cli-runtime v0.29.3
+ k8s.io/client-go v0.29.3
k8s.io/klog/v2 v2.120.1
- k8s.io/utils v0.0.0-20240102154912-e7106e64919e
+ k8s.io/utils v0.0.0-20240310230437-4693a0247e57
sigs.k8s.io/controller-runtime v0.17.2
sigs.k8s.io/yaml v1.4.0
)
@@ -49,7 +54,6 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
- github.com/evanphx/json-patch/v5 v5.8.0 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/go-logr/zapr v1.3.0 // indirect
@@ -59,7 +63,7 @@ require (
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/golang/protobuf v1.5.3 // indirect
+ github.com/golang/protobuf v1.5.4 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
@@ -90,21 +94,21 @@ require (
github.com/prometheus/procfs v0.12.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
- go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.19.0 // indirect
- golang.org/x/net v0.21.0 // indirect
+ golang.org/x/crypto v0.21.0 // indirect
+ golang.org/x/net v0.22.0 // indirect
golang.org/x/oauth2 v0.16.0 // indirect
golang.org/x/sync v0.6.0 // indirect
- golang.org/x/term v0.17.0 // indirect
+ golang.org/x/term v0.18.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
- golang.org/x/tools v0.18.0 // indirect
+ golang.org/x/tools v0.19.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
- google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/protobuf v1.32.0 // indirect
+ google.golang.org/appengine v1.6.8 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect
+ google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
- k8s.io/component-base v0.29.2 // indirect
+ k8s.io/component-base v0.29.3 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
diff --git a/go.sum b/go.sum
index 2915f952db..f3674eda0c 100644
--- a/go.sum
+++ b/go.sum
@@ -23,6 +23,8 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20240301101346-b0b310788fa1 h1:QDWsubYLQR8QDlj30ok9feh/u2ivamgtpiMsa7DLbqc=
+github.com/cloudnative-pg/cnpg-i v0.0.0-20240301101346-b0b310788fa1/go.mod h1:0G5GXQVj09KvONIcYURyroL74zOFGjv4eI5OXz7/G/0=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
@@ -37,8 +39,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro=
-github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
+github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
+github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
@@ -62,7 +64,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@@ -71,8 +72,9 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
-github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
+github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
@@ -101,6 +103,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@@ -110,8 +114,8 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk=
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
-github.com/jackc/pgx/v5 v5.5.4 h1:Xp2aQS8uXButQdnCMWNmvx6UysWQQC+u1EoizjguY+8=
-github.com/jackc/pgx/v5 v5.5.4/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
+github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
+github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@@ -157,10 +161,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
-github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
-github.com/onsi/gomega v1.31.1 h1:KYppCUK+bUgAZwHOu7EXVBKyQA6ILvOESHkn/tgoqvo=
-github.com/onsi/gomega v1.31.1/go.mod h1:y40C95dwAD1Nz36SsEnxvfFe8FFfNxzI5eJ0EYGyAy0=
+github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8=
+github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
+github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
+github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -211,6 +215,7 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY=
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
@@ -224,27 +229,30 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
-golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
+golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw=
+golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
+golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
@@ -253,6 +261,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -260,17 +269,23 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
-golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
+golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
@@ -283,8 +298,9 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
-golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
+golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -293,14 +309,18 @@ gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw
gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
-google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
+google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -311,8 +331,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I=
-google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
@@ -328,24 +348,24 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
-k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A=
-k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0=
-k8s.io/apiextensions-apiserver v0.29.2 h1:UK3xB5lOWSnhaCk0RFZ0LUacPZz9RY4wi/yt2Iu+btg=
-k8s.io/apiextensions-apiserver v0.29.2/go.mod h1:aLfYjpA5p3OwtqNXQFkhJ56TB+spV8Gc4wfMhUA3/b8=
-k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8=
-k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU=
-k8s.io/cli-runtime v0.29.2 h1:smfsOcT4QujeghsNjECKN3lwyX9AwcFU0nvJ7sFN3ro=
-k8s.io/cli-runtime v0.29.2/go.mod h1:KLisYYfoqeNfO+MkTWvpqIyb1wpJmmFJhioA0xd4MW8=
-k8s.io/client-go v0.29.2 h1:FEg85el1TeZp+/vYJM7hkDlSTFZ+c5nnK44DJ4FyoRg=
-k8s.io/client-go v0.29.2/go.mod h1:knlvFZE58VpqbQpJNbCbctTVXcd35mMyAAwBdpt4jrA=
-k8s.io/component-base v0.29.2 h1:lpiLyuvPA9yV1aQwGLENYyK7n/8t6l3nn3zAtFTJYe8=
-k8s.io/component-base v0.29.2/go.mod h1:BfB3SLrefbZXiBfbM+2H1dlat21Uewg/5qtKOl8degM=
+k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
+k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
+k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI=
+k8s.io/apiextensions-apiserver v0.29.3/go.mod h1:po0XiY5scnpJfFizNGo6puNU6Fq6D70UJY2Cb2KwAVc=
+k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
+k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
+k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k=
+k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM=
+k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
+k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
+k8s.io/component-base v0.29.3 h1:Oq9/nddUxlnrCuuR2K/jp6aflVvc0uDvxMzAWxnGzAo=
+k8s.io/component-base v0.29.3/go.mod h1:Yuj33XXjuOk2BAaHsIGHhCKZQAgYKhqIxIjIr2UXYio=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
-k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20240310230437-4693a0247e57 h1:gbqbevonBh57eILzModw6mrkbwM0gQBEuevE/AaBsHY=
+k8s.io/utils v0.0.0-20240310230437-4693a0247e57/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0=
sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
diff --git a/hack/e2e/run-e2e-k3d.sh b/hack/e2e/run-e2e-k3d.sh
index d33f64c6d4..407c03b1d2 100755
--- a/hack/e2e/run-e2e-k3d.sh
+++ b/hack/e2e/run-e2e-k3d.sh
@@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e"
export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false}
export BUILD_IMAGE=${BUILD_IMAGE:-false}
-K3D_NODE_DEFAULT_VERSION=v1.29.1
+K3D_NODE_DEFAULT_VERSION=v1.29.2
export K8S_VERSION=${K8S_VERSION:-$K3D_NODE_DEFAULT_VERSION}
export CLUSTER_ENGINE=k3d
export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-}
diff --git a/hack/install-config.yaml.template b/hack/install-config.yaml.template
index 84d7386562..840388b89b 100644
--- a/hack/install-config.yaml.template
+++ b/hack/install-config.yaml.template
@@ -22,7 +22,7 @@ networking:
hostPrefix: 23
machineNetwork:
- cidr: 10.0.0.0/16
- networkType: OpenShiftSDN
+ networkType: OVNKubernetes
serviceNetwork:
- 172.30.0.0/16
platform:
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index d4243141b5..123e84488b 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -25,7 +25,7 @@ fi
# Defaults
KIND_NODE_DEFAULT_VERSION=v1.29.2
-K3D_NODE_DEFAULT_VERSION=v1.29.1
+K3D_NODE_DEFAULT_VERSION=v1.29.2
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.11.0
EXTERNAL_SNAPSHOTTER_VERSION=v6.3.1
EXTERNAL_PROVISIONER_VERSION=v3.6.1
@@ -407,21 +407,12 @@ deploy_csi_host_path() {
deploy_pyroscope() {
- helm repo add pyroscope-io https://pyroscope-io.github.io/helm-chart
+ helm repo add pyroscope-io https://grafana.github.io/helm-charts
values_file="${TEMP_DIR}/pyroscope_values.yaml"
cat >"${values_file}" <<-EOF
pyroscopeConfigs:
log-level: "debug"
- scrape-configs:
- - job-name: cnpg
- enabled-profiles: [cpu, mem]
- static-configs:
- - application: cloudnative-pg
- targets:
- - cnpg-pprof:6060
- labels:
- cnpg: cnpg
EOF
helm -n cnpg-system install pyroscope pyroscope-io/pyroscope -f "${values_file}"
@@ -443,6 +434,22 @@ spec:
app.kubernetes.io/name: cloudnative-pg
EOF
kubectl -n cnpg-system apply -f "${service_file}"
+
+ annotations="${TEMP_DIR}/pyroscope_annotations.yaml"
+ cat >"${annotations}" <<- EOF
+spec:
+ template:
+ metadata:
+ annotations:
+ profiles.grafana.com/memory.scrape: "true"
+ profiles.grafana.com/memory.port: "6060"
+ profiles.grafana.com/cpu.scrape: "true"
+ profiles.grafana.com/cpu.port: "6060"
+ profiles.grafana.com/goroutine.scrape: "true"
+ profiles.grafana.com/goroutine.port: "6060"
+EOF
+
+ kubectl -n cnpg-system patch deployment cnpg-controller-manager --patch-file "${annotations}"
}
load_image_registry() {
@@ -503,8 +510,6 @@ Options:
-r|--registry Enable local registry. Env: ENABLE_REGISTRY
- -p|--pyroscope Enable Pyroscope in the operator namespace
-
To use long options you need to have GNU enhanced getopt available, otherwise
you can only use the short version of the options.
EOF
@@ -580,7 +585,7 @@ load() {
# to a future one, we build and push an image with a different VERSION
# to force a different hash for the manager binary.
# (Otherwise the ONLINE upgrade won't trigger)
-
+
echo "${bright}Building a 'prime' operator from current worktree${reset}"
PRIME_CONTROLLER_IMG="${CONTROLLER_IMG}-prime"
diff --git a/internal/cmd/manager/debug/architectures/cmd.go b/internal/cmd/manager/debug/architectures/cmd.go
new file mode 100644
index 0000000000..42b41b65fa
--- /dev/null
+++ b/internal/cmd/manager/debug/architectures/cmd.go
@@ -0,0 +1,63 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package architectures implement the show-architectures command
+package architectures
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// NewCmd creates the new cobra command
+func NewCmd() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "show-architectures",
+ Short: "Lists all the CPU architectures supported by this image",
+ RunE: func(_ *cobra.Command, _ []string) error {
+ if err := run(); err != nil {
+ log.Error(err, "Error while extracting the list of supported architectures")
+ return err
+ }
+
+ return nil
+ },
+ }
+
+ return &cmd
+}
+
+func run() error {
+ if err := utils.DetectAvailableArchitectures(); err != nil {
+ return err
+ }
+ availableArchitectures := utils.GetAvailableArchitectures()
+ architectures := make([]string, 0, len(availableArchitectures))
+ for _, arch := range availableArchitectures {
+ architectures = append(architectures, arch.GoArch)
+ }
+ val, err := json.MarshalIndent(architectures, "", " ")
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(val))
+ return nil
+}
diff --git a/internal/cmd/manager/debug/cmd.go b/internal/cmd/manager/debug/cmd.go
new file mode 100644
index 0000000000..e46fdbc735
--- /dev/null
+++ b/internal/cmd/manager/debug/cmd.go
@@ -0,0 +1,37 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package debug implement the debug command subfeatures
+package debug
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/manager/debug/architectures"
+)
+
+// NewCmd creates the new cobra command
+func NewCmd() *cobra.Command {
+ cmd := cobra.Command{
+ Use: "debug [cmd]",
+ Short: "Command aimed to gather useful debug data",
+ SilenceErrors: true,
+ }
+
+ cmd.AddCommand(architectures.NewCmd())
+
+ return &cmd
+}
diff --git a/internal/cmd/manager/instance/pgbasebackup/cmd.go b/internal/cmd/manager/instance/pgbasebackup/cmd.go
index cb27c59ddb..8d73ec4a45 100644
--- a/internal/cmd/manager/instance/pgbasebackup/cmd.go
+++ b/internal/cmd/manager/instance/pgbasebackup/cmd.go
@@ -128,6 +128,19 @@ func (env *CloneInfo) bootstrapUsingPgbasebackup(ctx context.Context) error {
return err
}
+ pgVersion, err := cluster.GetPostgresqlVersion()
+ if err != nil {
+ log.Warning(
+ "Error while parsing PostgreSQL server version to define connection options, defaulting to PostgreSQL 11",
+ "imageName", cluster.GetImageName(),
+ "err", err)
+ } else if pgVersion >= 120000 {
+ // We explicitly disable wal_sender_timeout for join-related pg_basebackup executions.
+ // A short timeout could not be enough in case the instance is slow to send data,
+ // like when the I/O is overloaded.
+ connectionString += " options='-c wal_sender_timeout=0s'"
+ }
+
err = postgres.ClonePgData(connectionString, env.info.PgData, env.info.PgWal)
if err != nil {
return err
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index 1f3e980594..bd3d1b31e5 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -123,7 +123,7 @@ func NewCmd() *cobra.Command {
}
cmd.Flags().StringVar(&podName, "pod-name", os.Getenv("POD_NAME"), "The name of the "+
"current pod in k8s")
- cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be created")
+ cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be used")
return &cmd
}
@@ -138,16 +138,6 @@ func run(
contextLog := log.FromContext(ctx)
walName := args[0]
- if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
- // Backup not configured, skipping WAL
- contextLog.Info("Backup not configured, skip WAL archiving",
- "walName", walName,
- "currentPrimary", cluster.Status.CurrentPrimary,
- "targetPrimary", cluster.Status.TargetPrimary,
- )
- return nil
- }
-
if cluster.Spec.ReplicaCluster != nil && cluster.Spec.ReplicaCluster.Enabled {
if podName != cluster.Status.CurrentPrimary && podName != cluster.Status.TargetPrimary {
contextLog.Debug("WAL archiving on a replica cluster, "+
@@ -169,9 +159,20 @@ func run(
return errSwitchoverInProgress
}
- maxParallel := 1
- if cluster.Spec.Backup.BarmanObjectStore.Wal != nil {
- maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel
+ // Request the plugins to archive this WAL
+ if err := archiveWALViaPlugins(ctx, cluster, path.Join(pgData, walName)); err != nil {
+ return err
+ }
+
+ // Request Barman Cloud to archive this WAL
+ if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
+ // Backup not configured, skipping WAL
+ contextLog.Info("Backup not configured, skip WAL archiving via Barman Cloud",
+ "walName", walName,
+ "currentPrimary", cluster.Status.CurrentPrimary,
+ "targetPrimary", cluster.Status.TargetPrimary,
+ )
+ return nil
}
// Get environment from cache
@@ -180,6 +181,11 @@ func run(
return fmt.Errorf("failed to get envs: %w", err)
}
+ maxParallel := 1
+ if cluster.Spec.Backup.BarmanObjectStore.Wal != nil {
+ maxParallel = cluster.Spec.Backup.BarmanObjectStore.Wal.MaxParallel
+ }
+
// Create the archiver
var walArchiver *archiver.WALArchiver
if walArchiver, err = archiver.New(ctx, cluster, env, SpoolDirectory, pgData); err != nil {
@@ -234,6 +240,26 @@ func run(
return walStatus[0].Err
}
+// archiveWALViaPlugins requests every capable plugin to archive the passed
+// WAL file, and returns an error if a configured plugin fails to do so.
+// It will not return an error if there's no plugin capable of WAL archiving
+func archiveWALViaPlugins(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ walName string,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ pluginClient, err := cluster.LoadSelectedPluginsClient(ctx, cluster.GetWALPluginNames())
+ if err != nil {
+ contextLogger.Error(err, "Error loading plugins while archiving a WAL")
+ return err
+ }
+ defer pluginClient.Close(ctx)
+
+ return pluginClient.ArchiveWAL(ctx, cluster, walName)
+}
+
// gatherWALFilesToArchive reads from the archived status the list of WAL files
// that can be archived in parallel way.
// `requestedWALFile` is the name of the file whose archiving was requested by
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index e43e0de9c8..50a591b9f8 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -22,6 +22,7 @@ import (
"errors"
"fmt"
"os"
+ "path"
"strings"
"time"
@@ -58,6 +59,7 @@ const (
// NewCmd creates a new cobra command
func NewCmd() *cobra.Command {
var podName string
+ var pgData string
cmd := cobra.Command{
Use: "wal-restore [name]",
@@ -66,7 +68,7 @@ func NewCmd() *cobra.Command {
RunE: func(cobraCmd *cobra.Command, args []string) error {
contextLog := log.WithName("wal-restore")
ctx := log.IntoContext(cobraCmd.Context(), contextLog)
- err := run(ctx, podName, args)
+ err := run(ctx, pgData, podName, args)
if err == nil {
return nil
}
@@ -93,11 +95,12 @@ func NewCmd() *cobra.Command {
cmd.Flags().StringVar(&podName, "pod-name", os.Getenv("POD_NAME"), "The name of the "+
"current pod in k8s")
+ cmd.Flags().StringVar(&pgData, "pg-data", os.Getenv("PGDATA"), "The PGDATA to be used")
return &cmd
}
-func run(ctx context.Context, podName string, args []string) error {
+func run(ctx context.Context, pgData string, podName string, args []string) error {
contextLog := log.FromContext(ctx)
startTime := time.Now()
walName := args[0]
@@ -111,6 +114,10 @@ func run(ctx context.Context, podName string, args []string) error {
return fmt.Errorf("failed to get cluster: %w", err)
}
+ if err := restoreWALViaPlugins(ctx, cluster, walName, path.Join(pgData, destinationPath)); err != nil {
+ return err
+ }
+
recoverClusterName, recoverEnv, barmanConfiguration, err := GetRecoverConfiguration(cluster, podName)
if errors.Is(err, ErrNoBackupConfigured) {
// Backup not configured, skipping WAL
@@ -225,6 +232,27 @@ func run(ctx context.Context, podName string, args []string) error {
return nil
}
+// restoreWALViaPlugins requests every capable plugin to restore the passed
+// WAL file, and returns an error if every plugin failed. It will not return
+// an error if there's no plugin capable of WAL archiving too
+func restoreWALViaPlugins(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ walName string,
+ destinationPathName string,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ pluginClient, err := cluster.LoadSelectedPluginsClient(ctx, cluster.GetWALPluginNames())
+ if err != nil {
+ contextLogger.Error(err, "Error loading plugins while archiving a WAL")
+ return err
+ }
+ defer pluginClient.Close(ctx)
+
+ return pluginClient.RestoreWAL(ctx, cluster, walName, destinationPathName)
+}
+
// checkEndOfWALStreamFlag returns ErrEndOfWALStreamReached if the flag is set in the restorer
func checkEndOfWALStreamFlag(walRestorer *restorer.WALRestorer) error {
contain, err := walRestorer.IsEndOfWALStream()
diff --git a/internal/cmd/plugin/backup/cmd.go b/internal/cmd/plugin/backup/cmd.go
index e942f8e05b..ee1cbc602a 100644
--- a/internal/cmd/plugin/backup/cmd.go
+++ b/internal/cmd/plugin/backup/cmd.go
@@ -63,7 +63,10 @@ func NewCmd() *cobra.Command {
backupSubcommand := &cobra.Command{
Use: "backup [cluster]",
Short: "Request an on-demand backup for a PostgreSQL Cluster",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
diff --git a/internal/cmd/plugin/certificate/cmd.go b/internal/cmd/plugin/certificate/cmd.go
index 12d90d0125..e1359baf06 100644
--- a/internal/cmd/plugin/certificate/cmd.go
+++ b/internal/cmd/plugin/certificate/cmd.go
@@ -32,7 +32,7 @@ func NewCmd() *cobra.Command {
Long: `This command creates a new Kubernetes secret containing the crypto-material.
This is needed to configure TLS with Certificate authentication access for an application to
connect to the PostgreSQL cluster.`,
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
RunE: func(cmd *cobra.Command, args []string) error {
ctx := context.Background()
secretName := args[0]
diff --git a/internal/cmd/plugin/fio/cmd.go b/internal/cmd/plugin/fio/cmd.go
index 2f872597fb..7c53e97aad 100644
--- a/internal/cmd/plugin/fio/cmd.go
+++ b/internal/cmd/plugin/fio/cmd.go
@@ -31,7 +31,7 @@ func NewCmd() *cobra.Command {
fioCmd := &cobra.Command{
Use: "fio [name]",
- Short: "Creates a fio deployment,pvc and configmap.",
+ Short: "Creates a fio deployment, pvc and configmap",
Args: cobra.MinimumNArgs(1),
Long: `Creates a fio deployment that will execute a fio job on the specified pvc.`,
Example: jobExample,
diff --git a/internal/cmd/plugin/hibernate/cmd.go b/internal/cmd/plugin/hibernate/cmd.go
index 688d858d50..c13a8028cd 100644
--- a/internal/cmd/plugin/hibernate/cmd.go
+++ b/internal/cmd/plugin/hibernate/cmd.go
@@ -28,7 +28,10 @@ var (
hibernateOnCmd = &cobra.Command{
Use: "on [cluster]",
Short: "Hibernates the cluster named [cluster]",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
force, err := cmd.Flags().GetBool("force")
@@ -48,7 +51,10 @@ var (
hibernateOffCmd = &cobra.Command{
Use: "off [cluster]",
Short: "Bring the cluster named [cluster] back from hibernation",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
off := newOffCommand(cmd.Context(), clusterName)
@@ -59,7 +65,10 @@ var (
hibernateStatusCmd = &cobra.Command{
Use: "status [cluster]",
Short: "Prints the hibernation status for the [cluster]",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
rawOutput, err := cmd.Flags().GetString("output")
diff --git a/internal/cmd/plugin/logical/database.go b/internal/cmd/plugin/logical/database.go
new file mode 100644
index 0000000000..d9f388a2ba
--- /dev/null
+++ b/internal/cmd/plugin/logical/database.go
@@ -0,0 +1,67 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logical
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/lib/pq"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+)
+
+// GetApplicationDatabaseName gets the application database name for
+// a cluster with a given name
+func GetApplicationDatabaseName(ctx context.Context, clusterName string) (string, error) {
+ var cluster apiv1.Cluster
+ err := plugin.Client.Get(
+ ctx,
+ client.ObjectKey{
+ Namespace: plugin.Namespace,
+ Name: clusterName,
+ },
+ &cluster,
+ )
+ if err != nil {
+ return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace)
+ }
+
+ return cluster.GetApplicationDatabaseName(), nil
+}
+
+// GetSubscriptionConnInfo gets the connection string a subscription is connected to
+func GetSubscriptionConnInfo(
+ ctx context.Context,
+ clusterName string,
+ connectionString string,
+ subscriptionName string,
+) (string, error) {
+ sqlCommand := fmt.Sprintf(
+ "SELECT subconninfo FROM pg_catalog.pg_subscription WHERE subname=%s",
+ pq.QuoteLiteral(subscriptionName),
+ )
+ output, err := RunSQLWithOutput(
+ ctx,
+ clusterName,
+ connectionString,
+ sqlCommand,
+ )
+ return string(output), err
+}
diff --git a/internal/cmd/plugin/logical/doc.go b/internal/cmd/plugin/logical/doc.go
new file mode 100644
index 0000000000..e0b57900fd
--- /dev/null
+++ b/internal/cmd/plugin/logical/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package logical contains the common features of the
+// publication/subscription management
+package logical
diff --git a/internal/cmd/plugin/logical/externalcluster.go b/internal/cmd/plugin/logical/externalcluster.go
new file mode 100644
index 0000000000..ca136ae478
--- /dev/null
+++ b/internal/cmd/plugin/logical/externalcluster.go
@@ -0,0 +1,57 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logical
+
+import (
+ "context"
+ "fmt"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/external"
+)
+
+// GetConnectionString gets the connection string to be used to connect to
+// the specified external cluster, while connected to a pod of the specified
+// cluster.
+func GetConnectionString(
+ ctx context.Context,
+ clusterName string,
+ externalClusterName string,
+) (string, error) {
+ var cluster apiv1.Cluster
+ err := plugin.Client.Get(
+ ctx,
+ client.ObjectKey{
+ Namespace: plugin.Namespace,
+ Name: clusterName,
+ },
+ &cluster,
+ )
+ if err != nil {
+ return "", fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace)
+ }
+
+ externalCluster, ok := cluster.ExternalCluster(externalClusterName)
+ if !ok {
+ return "", fmt.Errorf("external cluster not existent in the cluster definition")
+ }
+
+ return external.GetServerConnectionString(&externalCluster), nil
+}
diff --git a/internal/cmd/plugin/logical/psql.go b/internal/cmd/plugin/logical/psql.go
new file mode 100644
index 0000000000..3c4b1c1670
--- /dev/null
+++ b/internal/cmd/plugin/logical/psql.go
@@ -0,0 +1,81 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logical
+
+import (
+ "context"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/psql"
+)
+
+// RunSQL execs a SQL statement while connected via `psql` to
+// to a Pod of a cluster, targeting a passed connection string
+func RunSQL(
+ ctx context.Context,
+ clusterName string,
+ connectionString string,
+ sqlCommand string,
+) error {
+ cmd, err := getSQLCommand(ctx, clusterName, connectionString, sqlCommand, "-qAt")
+ if err != nil {
+ return err
+ }
+
+ return cmd.Run()
+}
+
+// RunSQLWithOutput execs a SQL statement while connected via `psql` to
+// to a Pod of a cluster, targeting a passed connection string
+func RunSQLWithOutput(
+ ctx context.Context,
+ clusterName string,
+ connectionString string,
+ sqlCommand string,
+) ([]byte, error) {
+ cmd, err := getSQLCommand(ctx, clusterName, connectionString, sqlCommand, "-qAt")
+ if err != nil {
+ return nil, err
+ }
+
+ return cmd.Output()
+}
+
+func getSQLCommand(
+ ctx context.Context,
+ clusterName string,
+ connectionString string,
+ sqlCommand string,
+ args ...string,
+) (*psql.Command, error) {
+ psqlArgs := []string{
+ connectionString,
+ "-c",
+ sqlCommand,
+ }
+ psqlArgs = append(psqlArgs, args...)
+ psqlOptions := psql.CommandOptions{
+ Replica: false,
+ Namespace: plugin.Namespace,
+ AllocateTTY: false,
+ PassStdin: false,
+ Args: psqlArgs,
+ Name: clusterName,
+ }
+
+ return psql.NewCommand(ctx, psqlOptions)
+}
diff --git a/internal/cmd/plugin/logical/publication/cmd.go b/internal/cmd/plugin/logical/publication/cmd.go
new file mode 100644
index 0000000000..0409a2b04e
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/cmd.go
@@ -0,0 +1,36 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package publication
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/create"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/publication/drop"
+)
+
+// NewCmd initializes the publication command
+func NewCmd() *cobra.Command {
+ publicationCmd := &cobra.Command{
+ Use: "publication",
+ Short: "Logical publication management commands",
+ }
+ publicationCmd.AddCommand(create.NewCmd())
+ publicationCmd.AddCommand(drop.NewCmd())
+
+ return publicationCmd
+}
diff --git a/internal/cmd/plugin/logical/publication/create/cmd.go b/internal/cmd/plugin/logical/publication/create/cmd.go
new file mode 100644
index 0000000000..885fa9ea7d
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/create/cmd.go
@@ -0,0 +1,172 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+// NewCmd initializes the publication create command
+func NewCmd() *cobra.Command {
+ var dbName string
+ var allTables bool
+ var schemaNames []string
+ var tableExprs []string
+ var publicationName string
+ var externalClusterName string
+ var publicationParameters string
+ var dryRun bool
+
+ publicationCreateCmd := &cobra.Command{
+ Use: "create cluster_name",
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ Short: "create a logical replication publication",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ dbName := strings.TrimSpace(dbName)
+ publicationName := strings.TrimSpace(publicationName)
+ clusterName := args[0]
+
+ if len(dbName) == 0 {
+ var err error
+ dbName, err = logical.GetApplicationDatabaseName(cmd.Context(), clusterName)
+ if err != nil {
+ return err
+ }
+ }
+ if len(dbName) == 0 {
+ return fmt.Errorf(
+ "the name of the database was not specified and there is no available application database")
+ }
+
+ sqlCommandBuilder := PublicationCmdBuilder{
+ PublicationName: publicationName,
+ PublicationParameters: publicationParameters,
+ }
+
+ if allTables {
+ sqlCommandBuilder.PublicationTarget = &PublicationTargetALLTables{}
+ } else {
+ targets := &PublicationTargetPublicationObjects{}
+ for _, schemaName := range schemaNames {
+ targets.PublicationObjects = append(
+ targets.PublicationObjects,
+ &PublicationObjectSchema{
+ SchemaName: schemaName,
+ },
+ )
+ }
+
+ if len(tableExprs) > 0 {
+ targets.PublicationObjects = append(
+ targets.PublicationObjects,
+ &PublicationObjectTableExpression{
+ TableExpressions: tableExprs,
+ },
+ )
+ }
+ sqlCommandBuilder.PublicationTarget = targets
+ }
+
+ target := dbName
+ if len(externalClusterName) > 0 {
+ var err error
+ target, err = logical.GetConnectionString(cmd.Context(), clusterName, externalClusterName)
+ if err != nil {
+ return err
+ }
+ }
+
+ sqlCommand := sqlCommandBuilder.ToSQL()
+ fmt.Println(sqlCommand)
+ if dryRun {
+ return nil
+ }
+
+ return logical.RunSQL(cmd.Context(), clusterName, target, sqlCommand)
+ },
+ }
+
+ publicationCreateCmd.Flags().StringVar(
+ &dbName,
+ "dbname",
+ "",
+ "The database in which the command should create the publication "+
+ "(defaults to the name of the application database)",
+ )
+
+ publicationCreateCmd.Flags().StringVar(
+ &publicationName,
+ "publication",
+ "",
+ "The name of the publication to be created (required)",
+ )
+ _ = publicationCreateCmd.MarkFlagRequired("publication")
+
+ publicationCreateCmd.Flags().BoolVar(
+ &allTables,
+ "all-tables",
+ false,
+ "Create the publication for all the tables in the database or in the schema",
+ )
+ publicationCreateCmd.Flags().StringSliceVar(
+ &schemaNames,
+ "schema",
+ nil,
+ "Create the publication for all the tables in the selected schema",
+ )
+ publicationCreateCmd.Flags().StringSliceVar(
+ &tableExprs,
+ "table",
+ nil,
+ "Create the publication for the selected table expression",
+ )
+ publicationCreateCmd.MarkFlagsOneRequired("all-tables", "schema", "table")
+ publicationCreateCmd.MarkFlagsMutuallyExclusive("all-tables", "schema")
+ publicationCreateCmd.MarkFlagsMutuallyExclusive("all-tables", "table")
+
+ publicationCreateCmd.Flags().StringVar(
+ &externalClusterName,
+ "external-cluster",
+ "",
+ "The cluster in which to create the publication. Defaults to the local cluster",
+ )
+ publicationCreateCmd.Flags().BoolVar(
+ &dryRun,
+ "dry-run",
+ false,
+ "If specified, the publication commands are shown but not executed",
+ )
+
+ publicationCreateCmd.Flags().StringVar(
+ &publicationParameters,
+ "parameters",
+ "",
+ "The publication parameters. IMPORTANT: this command won't perform any validation. "+
+ "Users are responsible for passing them correctly",
+ )
+
+ return publicationCreateCmd
+}
diff --git a/internal/cmd/plugin/logical/publication/create/doc.go b/internal/cmd/plugin/logical/publication/create/doc.go
new file mode 100644
index 0000000000..5717d3abdf
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/create/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package create contains the implementation of the kubectl cnpg publication create command
+package create
diff --git a/internal/cmd/plugin/logical/publication/create/publication.go b/internal/cmd/plugin/logical/publication/create/publication.go
new file mode 100644
index 0000000000..9a6be06362
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/create/publication.go
@@ -0,0 +1,114 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+)
+
+// PublicationCmdBuilder represent a command to create a publication
+type PublicationCmdBuilder struct {
+ // The name of the publication to be created
+ PublicationName string
+
+ // The target to be publication
+ PublicationTarget PublicationTarget
+
+ // The optional publication parameters
+ PublicationParameters string
+}
+
+// ToSQL create the SQL Statement to create the publication
+func (cmd PublicationCmdBuilder) ToSQL() string {
+ result := fmt.Sprintf(
+ "CREATE PUBLICATION %s %s",
+ pgx.Identifier{cmd.PublicationName}.Sanitize(),
+ cmd.PublicationTarget.ToPublicationTargetSQL(),
+ )
+
+ if len(cmd.PublicationParameters) > 0 {
+ result = fmt.Sprintf("%s WITH (%s)", result, cmd.PublicationParameters)
+ }
+
+ return result
+}
+
+// PublicationTarget represent the publication target
+type PublicationTarget interface {
+ // Create the SQL statement to publication the tables
+ ToPublicationTargetSQL() string
+}
+
+// PublicationTargetALLTables will publicate all tables
+type PublicationTargetALLTables struct{}
+
+// ToPublicationTargetSQL implements the PublicationTarget interface
+func (PublicationTargetALLTables) ToPublicationTargetSQL() string {
+ return "FOR ALL TABLES"
+}
+
+// PublicationTargetPublicationObjects publicates multiple publication objects
+type PublicationTargetPublicationObjects struct {
+ PublicationObjects []PublicationObject
+}
+
+// ToPublicationTargetSQL implements the PublicationObject interface
+func (objs *PublicationTargetPublicationObjects) ToPublicationTargetSQL() string {
+ result := ""
+ for _, object := range objs.PublicationObjects {
+ if len(result) > 0 {
+ result += ", "
+ }
+ result += object.ToPublicationObjectSQL()
+ }
+
+ if len(result) > 0 {
+ result = fmt.Sprintf("FOR %s", result)
+ }
+ return result
+}
+
+// PublicationObject represent an object to publicate
+type PublicationObject interface {
+ // Create the SQL statement to publicate this object
+ ToPublicationObjectSQL() string
+}
+
+// PublicationObjectSchema will publicate all the tables in a certain schema
+type PublicationObjectSchema struct {
+ // The schema to publicate
+ SchemaName string
+}
+
+// ToPublicationObjectSQL implements the PublicationObject interface
+func (obj PublicationObjectSchema) ToPublicationObjectSQL() string {
+ return fmt.Sprintf("TABLES IN SCHEMA %s", pgx.Identifier{obj.SchemaName}.Sanitize())
+}
+
+// PublicationObjectTableExpression will publicate the passed table expression
+type PublicationObjectTableExpression struct {
+ // The table expression to publicate
+ TableExpressions []string
+}
+
+// ToPublicationObjectSQL implements the PublicationObject interface
+func (obj PublicationObjectTableExpression) ToPublicationObjectSQL() string {
+ return fmt.Sprintf("TABLE %s", strings.Join(obj.TableExpressions, ", "))
+}
diff --git a/internal/cmd/plugin/logical/publication/create/publication_test.go b/internal/cmd/plugin/logical/publication/create/publication_test.go
new file mode 100644
index 0000000000..60081a0b0e
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/create/publication_test.go
@@ -0,0 +1,99 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("create publication SQL generator", func() {
+ It("can publicate all tables", func() {
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: PublicationTargetALLTables{},
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES`))
+ })
+
+ It("can publicate all tables with custom parameters", func() {
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: PublicationTargetALLTables{},
+ PublicationParameters: "publish='insert'",
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR ALL TABLES WITH (publish='insert')`))
+ })
+
+ It("can publicate a list of tables via multiple publication objects", func() {
+ // This is supported from PG 15
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: &PublicationTargetPublicationObjects{
+ PublicationObjects: []PublicationObject{
+ PublicationObjectTableExpression{
+ TableExpressions: []string{"a"},
+ },
+ PublicationObjectTableExpression{
+ TableExpressions: []string{"b"},
+ },
+ },
+ },
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLE a, TABLE b`))
+ })
+
+ It("can publicate a list of tables via multiple table expressions", func() {
+ // This is supported in PG < 15
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: &PublicationTargetPublicationObjects{
+ PublicationObjects: []PublicationObject{
+ PublicationObjectTableExpression{
+ TableExpressions: []string{"a", "b"},
+ },
+ },
+ },
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLE a, b`))
+ })
+
+ It("can publish a schema via multiple publication objects", func() {
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: &PublicationTargetPublicationObjects{
+ PublicationObjects: []PublicationObject{
+ PublicationObjectSchema{
+ SchemaName: "public",
+ },
+ },
+ },
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLES IN SCHEMA "public"`))
+ })
+
+ It("can publish multiple schemas via multiple publication objects", func() {
+ Expect(PublicationCmdBuilder{
+ PublicationName: "app",
+ PublicationTarget: &PublicationTargetPublicationObjects{
+ PublicationObjects: []PublicationObject{
+ PublicationObjectSchema{
+ SchemaName: "public",
+ },
+ PublicationObjectSchema{
+ SchemaName: "next",
+ },
+ },
+ },
+ }.ToSQL()).To(Equal(`CREATE PUBLICATION "app" FOR TABLES IN SCHEMA "public", TABLES IN SCHEMA "next"`))
+ })
+})
diff --git a/internal/cmd/plugin/logical/publication/create/suite_test.go b/internal/cmd/plugin/logical/publication/create/suite_test.go
new file mode 100644
index 0000000000..b87263eec2
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/create/suite_test.go
@@ -0,0 +1,29 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestCreatePublication(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Create publication subcommand test suite")
+}
diff --git a/internal/cmd/plugin/logical/publication/doc.go b/internal/cmd/plugin/logical/publication/doc.go
new file mode 100644
index 0000000000..f83e7f3303
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package publication contains the implementation of the kubectl cnpg publication command
+package publication
diff --git a/internal/cmd/plugin/logical/publication/drop/cmd.go b/internal/cmd/plugin/logical/publication/drop/cmd.go
new file mode 100644
index 0000000000..4d0d513b6e
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/drop/cmd.go
@@ -0,0 +1,110 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package drop
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+// NewCmd initializes the publication create command
+func NewCmd() *cobra.Command {
+ var publicationName string
+ var dbName string
+ var externalClusterName string
+ var dryRun bool
+
+ publicationDropCmd := &cobra.Command{
+ Use: "drop cluster_name",
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ Short: "drop a logical replication publication",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ publicationName := strings.TrimSpace(publicationName)
+ clusterName := args[0]
+
+ if len(dbName) == 0 {
+ var err error
+ dbName, err = logical.GetApplicationDatabaseName(cmd.Context(), clusterName)
+ if err != nil {
+ return err
+ }
+ }
+ if len(dbName) == 0 {
+ return fmt.Errorf(
+ "the name of the database was not specified and there is no available application database")
+ }
+
+ sqlCommand := fmt.Sprintf(
+ "DROP PUBLICATION %s",
+ pgx.Identifier{publicationName}.Sanitize(),
+ )
+ fmt.Println(sqlCommand)
+ if dryRun {
+ return nil
+ }
+
+ target := dbName
+ if len(externalClusterName) > 0 {
+ var err error
+ target, err = logical.GetConnectionString(cmd.Context(), clusterName, externalClusterName)
+ if err != nil {
+ return err
+ }
+ }
+
+ return logical.RunSQL(cmd.Context(), clusterName, target, sqlCommand)
+ },
+ }
+
+ publicationDropCmd.Flags().StringVar(
+ &publicationName,
+ "publication",
+ "",
+ "The name of the publication to be dropped (required)",
+ )
+ _ = publicationDropCmd.MarkFlagRequired("publication")
+
+ publicationDropCmd.Flags().StringVar(
+ &dbName,
+ "dbname",
+ "",
+ "The database in which the command should drop the publication",
+ )
+ publicationDropCmd.Flags().StringVar(
+ &externalClusterName,
+ "external-cluster",
+ "",
+ "The cluster where to drop the publication. Defaults to the local cluster",
+ )
+ publicationDropCmd.Flags().BoolVar(
+ &dryRun,
+ "dry-run",
+ false,
+ "If specified, the publication deletion commands are shown but not executed",
+ )
+
+ return publicationDropCmd
+}
diff --git a/internal/cmd/plugin/logical/publication/drop/doc.go b/internal/cmd/plugin/logical/publication/drop/doc.go
new file mode 100644
index 0000000000..42d91045fb
--- /dev/null
+++ b/internal/cmd/plugin/logical/publication/drop/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package drop contains the implementation of the kubectl cnpg publication drop command
+package drop
diff --git a/internal/cmd/plugin/logical/subscription/cmd.go b/internal/cmd/plugin/logical/subscription/cmd.go
new file mode 100644
index 0000000000..7c46b96fc7
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/cmd.go
@@ -0,0 +1,38 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package subscription
+
+import (
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/create"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/drop"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical/subscription/syncsequences"
+)
+
+// NewCmd initializes the subscription command
+func NewCmd() *cobra.Command {
+ subscriptionCmd := &cobra.Command{
+ Use: "subscription",
+ Short: "Logical subscription management commands",
+ }
+ subscriptionCmd.AddCommand(create.NewCmd())
+ subscriptionCmd.AddCommand(drop.NewCmd())
+ subscriptionCmd.AddCommand(syncsequences.NewCmd())
+
+ return subscriptionCmd
+}
diff --git a/internal/cmd/plugin/logical/subscription/create/cmd.go b/internal/cmd/plugin/logical/subscription/create/cmd.go
new file mode 100644
index 0000000000..0ea1daacea
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/create/cmd.go
@@ -0,0 +1,131 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+// NewCmd initializes the subscription create command
+func NewCmd() *cobra.Command {
+ var externalClusterName string
+ var publicationName string
+ var subscriptionName string
+ var dbName string
+ var parameters string
+ var dryRun bool
+
+ subscriptionCreateCmd := &cobra.Command{
+ Use: "create cluster_name",
+ Short: "create a logical replication subscription",
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clusterName := args[0]
+ externalClusterName := strings.TrimSpace(externalClusterName)
+ publicationName := strings.TrimSpace(publicationName)
+ subscriptionName := strings.TrimSpace(subscriptionName)
+ dbName := strings.TrimSpace(dbName)
+
+ if len(dbName) == 0 {
+ var err error
+ dbName, err = logical.GetApplicationDatabaseName(cmd.Context(), clusterName)
+ if err != nil {
+ return err
+ }
+ }
+ if len(dbName) == 0 {
+ return fmt.Errorf(
+ "the name of the database was not specified and there is no available application database")
+ }
+
+ connectionString, err := logical.GetConnectionString(cmd.Context(), clusterName, externalClusterName)
+ if err != nil {
+ return err
+ }
+
+ createCmd := SubscriptionCmdBuilder{
+ SubscriptionName: subscriptionName,
+ PublicationName: publicationName,
+ ConnectionString: connectionString,
+ Parameters: parameters,
+ }
+ sqlCommand := createCmd.ToSQL()
+
+ fmt.Println(sqlCommand)
+ if dryRun {
+ return nil
+ }
+
+ return logical.RunSQL(cmd.Context(), clusterName, dbName, sqlCommand)
+ },
+ }
+
+ subscriptionCreateCmd.Flags().StringVar(
+ &externalClusterName,
+ "external-cluster",
+ "",
+ "The external cluster name (required)",
+ )
+ _ = subscriptionCreateCmd.MarkFlagRequired("external-cluster")
+
+ subscriptionCreateCmd.Flags().StringVar(
+ &publicationName,
+ "publication",
+ "",
+ "The name of the publication to subscribe to (required)",
+ )
+ _ = subscriptionCreateCmd.MarkFlagRequired("publication")
+
+ subscriptionCreateCmd.Flags().StringVar(
+ &subscriptionName,
+ "subscription",
+ "",
+ "The name of the subscription to create (required)",
+ )
+ _ = subscriptionCreateCmd.MarkFlagRequired("subscription")
+
+ subscriptionCreateCmd.Flags().StringVar(
+ &dbName,
+ "dbname",
+ "",
+ "The name of the application where to create the subscription. Defaults to the application database if available",
+ )
+ subscriptionCreateCmd.Flags().StringVar(
+ ¶meters,
+ "parameters",
+ "",
+ "The subscription parameters. IMPORTANT: this command won't perform any validation. "+
+ "Users are responsible for passing them correctly",
+ )
+ subscriptionCreateCmd.Flags().BoolVar(
+ &dryRun,
+ "dry-run",
+ false,
+ "If specified, the subscription commands are shown but not executed",
+ )
+
+ return subscriptionCreateCmd
+}
diff --git a/internal/cmd/plugin/logical/subscription/create/doc.go b/internal/cmd/plugin/logical/subscription/create/doc.go
new file mode 100644
index 0000000000..a2918f60e6
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/create/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package create contains the implementation of the kubectl cnpg subscription create command
+package create
diff --git a/internal/cmd/plugin/logical/subscription/create/subscription.go b/internal/cmd/plugin/logical/subscription/create/subscription.go
new file mode 100644
index 0000000000..f545364116
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/create/subscription.go
@@ -0,0 +1,55 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package create
+
+import (
+ "fmt"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/lib/pq"
+)
+
+// SubscriptionCmdBuilder represent a command to create a subscription
+type SubscriptionCmdBuilder struct {
+ // The name of the publication to be created
+ SubscriptionName string
+
+ // The connection to the source database
+ ConnectionString string
+
+ // The name of the publication to attach to
+ PublicationName string
+
+ // Custom subscription parameters
+ Parameters string
+}
+
+// ToSQL create the SQL Statement to create the publication
+func (cmd SubscriptionCmdBuilder) ToSQL() string {
+ result := fmt.Sprintf(
+ "CREATE SUBSCRIPTION %s CONNECTION %s PUBLICATION %s",
+ pgx.Identifier{cmd.SubscriptionName}.Sanitize(),
+ pq.QuoteLiteral(cmd.ConnectionString),
+ cmd.PublicationName,
+ )
+
+ if len(cmd.Parameters) > 0 {
+ result = fmt.Sprintf("%s WITH (%s)", result, cmd.Parameters)
+ }
+
+ return result
+}
diff --git a/internal/cmd/plugin/logical/subscription/doc.go b/internal/cmd/plugin/logical/subscription/doc.go
new file mode 100644
index 0000000000..63b527ec2e
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package subscription contains the implementation of the kubectl cnpg subscription command
+package subscription
diff --git a/internal/cmd/plugin/logical/subscription/drop/cmd.go b/internal/cmd/plugin/logical/subscription/drop/cmd.go
new file mode 100644
index 0000000000..bba02c68bc
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/drop/cmd.go
@@ -0,0 +1,96 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package drop
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+// NewCmd initializes the subscription create command
+func NewCmd() *cobra.Command {
+ var subscriptionName string
+ var dbName string
+ var dryRun bool
+
+ subscriptionDropCmd := &cobra.Command{
+ Use: "drop cluster_name",
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ Short: "drop a logical replication subscription",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ subscriptionName := strings.TrimSpace(subscriptionName)
+ clusterName := args[0]
+ dbName := strings.TrimSpace(dbName)
+
+ if len(dbName) == 0 {
+ var err error
+ dbName, err = logical.GetApplicationDatabaseName(cmd.Context(), clusterName)
+ if err != nil {
+ return err
+ }
+ }
+ if len(dbName) == 0 {
+ return fmt.Errorf(
+ "the name of the database was not specified and there is no available application database")
+ }
+
+ sqlCommand := fmt.Sprintf(
+ "DROP SUBSCRIPTION %s",
+ pgx.Identifier{subscriptionName}.Sanitize(),
+ )
+ fmt.Println(sqlCommand)
+ if dryRun {
+ return nil
+ }
+
+ return logical.RunSQL(cmd.Context(), clusterName, dbName, sqlCommand)
+ },
+ }
+
+ subscriptionDropCmd.Flags().StringVar(
+ &subscriptionName,
+ "subscription",
+ "",
+ "The name of the subscription to be dropped (required)",
+ )
+ _ = subscriptionDropCmd.MarkFlagRequired("subscription")
+
+ subscriptionDropCmd.Flags().StringVar(
+ &dbName,
+ "dbname",
+ "",
+ "The database in which the command should drop the subscription (required)",
+ )
+
+ subscriptionDropCmd.Flags().BoolVar(
+ &dryRun,
+ "dry-run",
+ false,
+ "If specified, the subscription deletion commands are shown but not executed",
+ )
+
+ return subscriptionDropCmd
+}
diff --git a/internal/cmd/plugin/logical/subscription/drop/doc.go b/internal/cmd/plugin/logical/subscription/drop/doc.go
new file mode 100644
index 0000000000..a5af8cbcef
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/drop/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package drop contains the implementatoin of the cnpg subscription drop command
+package drop
diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go
new file mode 100644
index 0000000000..a73fe0ff09
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/syncsequences/cmd.go
@@ -0,0 +1,130 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package syncsequences
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+// NewCmd initializes the subscription create command
+func NewCmd() *cobra.Command {
+ var subscriptionName string
+ var dbName string
+ var dryRun bool
+ var offset int
+
+ syncSequencesCmd := &cobra.Command{
+ Use: "sync-sequences cluster_name",
+ Short: "synchronize the sequences from the source database",
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ RunE: func(cmd *cobra.Command, args []string) error {
+ clusterName := args[0]
+ subscriptionName := strings.TrimSpace(subscriptionName)
+ dbName := strings.TrimSpace(dbName)
+
+ var cluster apiv1.Cluster
+ err := plugin.Client.Get(
+ cmd.Context(),
+ client.ObjectKey{
+ Namespace: plugin.Namespace,
+ Name: clusterName,
+ },
+ &cluster,
+ )
+ if err != nil {
+ return fmt.Errorf("cluster %s not found in namespace %s", clusterName, plugin.Namespace)
+ }
+
+ if len(dbName) == 0 {
+ dbName = cluster.GetApplicationDatabaseName()
+ }
+ if len(dbName) == 0 {
+ return fmt.Errorf(
+ "the name of the database was not specified and there is no available application database")
+ }
+
+ connectionString, err := logical.GetSubscriptionConnInfo(cmd.Context(), clusterName, dbName, subscriptionName)
+ if err != nil {
+ return fmt.Errorf(
+ "while getting connection string from subscription: %w", err)
+ }
+ if len(connectionString) == 0 {
+ return fmt.Errorf(
+ "subscription %s was not found", subscriptionName)
+ }
+
+ sourceStatus, err := GetSequenceStatus(cmd.Context(), clusterName, connectionString)
+ if err != nil {
+ return fmt.Errorf("while getting sequences status from the source database: %w", err)
+ }
+
+ destinationStatus, err := GetSequenceStatus(cmd.Context(), clusterName, dbName)
+ if err != nil {
+ return fmt.Errorf("while getting sequences status from the destination database: %w", err)
+ }
+
+ script := CreateSyncScript(sourceStatus, destinationStatus, offset)
+ fmt.Println(script)
+ if dryRun {
+ return nil
+ }
+
+ return logical.RunSQL(cmd.Context(), clusterName, dbName, script)
+ },
+ }
+
+ syncSequencesCmd.Flags().StringVar(
+ &subscriptionName,
+ "subscription",
+ "",
+ "The name of the subscription on which to refresh sequences (required)",
+ )
+ _ = syncSequencesCmd.MarkFlagRequired("subscription")
+
+ syncSequencesCmd.Flags().StringVar(
+ &dbName,
+ "dbname",
+ "",
+ "The name of the database where the subscription is present and sequences need to be updated. "+
+ "Defaults to the application database, if available",
+ )
+ syncSequencesCmd.Flags().BoolVar(
+ &dryRun,
+ "dry-run",
+ false,
+ "If specified, the subscription is not created",
+ )
+ syncSequencesCmd.Flags().IntVar(
+ &offset,
+ "offset",
+ 0,
+ "The number to add to every sequence number before being updated",
+ )
+
+ return syncSequencesCmd
+}
diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/doc.go b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go
new file mode 100644
index 0000000000..4abc977de1
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/syncsequences/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package syncsequences contains the implementation of the
+// kubectl cnpg subscription sync-sequences command
+package syncsequences
diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/get.go b/internal/cmd/plugin/logical/subscription/syncsequences/get.go
new file mode 100644
index 0000000000..f4fb97d6c1
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/syncsequences/get.go
@@ -0,0 +1,87 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package syncsequences contains the implementation of the
+// kubectl cnpg subscription sync-sequences command
+package syncsequences
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/jackc/pgx/v5"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin/logical"
+)
+
+const sqlGetSequences = `
+WITH seqs AS (
+ SELECT
+ sequencename AS sq_name,
+ schemaname AS sq_namespace,
+ last_value AS sq_value,
+ CURRENT_TIMESTAMP AS ts
+ FROM pg_catalog.pg_sequences s
+)
+SELECT pg_catalog.json_agg(seqs) FROM seqs
+`
+
+// SequenceStatus represent the status of a sequence in a certain moment
+type SequenceStatus struct {
+ // The name of the sequence
+ Name string `json:"sq_name"`
+
+ // The namespace where the sequence is defined
+ Namespace string `json:"sq_namespace"`
+
+ // The last value emitted from the sequence
+ Value *int `json:"sq_value"`
+}
+
+// QualifiedName gets the qualified name of this sequence
+func (status *SequenceStatus) QualifiedName() string {
+ return fmt.Sprintf(
+ "%s.%s",
+ pgx.Identifier{status.Namespace}.Sanitize(),
+ pgx.Identifier{status.Name}.Sanitize(),
+ )
+}
+
+// SequenceMap is a map between a qualified sequence name
+// and its current value
+type SequenceMap map[string]*int
+
+// GetSequenceStatus gets the status of the sequences while being connected to
+// a pod of a cluster to the specified connection string
+func GetSequenceStatus(ctx context.Context, clusterName string, connectionString string) (SequenceMap, error) {
+ output, err := logical.RunSQLWithOutput(ctx, clusterName, connectionString, sqlGetSequences)
+ if err != nil {
+ return nil, fmt.Errorf("while executing query: %w", err)
+ }
+
+ var records []SequenceStatus
+ if err := json.Unmarshal(output, &records); err != nil {
+ return nil, fmt.Errorf("while decoding JSON output: %w", err)
+ }
+
+ result := make(SequenceMap)
+ for i := range records {
+ result[records[i].QualifiedName()] = records[i].Value
+ }
+
+ return result, nil
+}
diff --git a/internal/cmd/plugin/logical/subscription/syncsequences/update.go b/internal/cmd/plugin/logical/subscription/syncsequences/update.go
new file mode 100644
index 0000000000..da6a7315d0
--- /dev/null
+++ b/internal/cmd/plugin/logical/subscription/syncsequences/update.go
@@ -0,0 +1,53 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package syncsequences
+
+import (
+ "fmt"
+
+ "github.com/lib/pq"
+)
+
+// CreateSyncScript creates a SQL script to synchronize the sequences
+// in the destination database with the status of the source database
+func CreateSyncScript(source, destination SequenceMap, offset int) string {
+ script := ""
+
+ for name := range destination {
+ targetValue, ok := source[name]
+ if !ok {
+ // This sequence is not available in the source database,
+ // there's no need to update it
+ continue
+ }
+
+ sqlTargetValue := "NULL"
+ if targetValue != nil {
+ sqlTargetValue = fmt.Sprintf("%d", *targetValue)
+ if offset != 0 {
+ sqlTargetValue = fmt.Sprintf("%s + %d", sqlTargetValue, offset)
+ }
+ }
+
+ script += fmt.Sprintf(
+ "SELECT setval(%s, %v);\n",
+ pq.QuoteLiteral(name),
+ sqlTargetValue)
+ }
+
+ return script
+}
diff --git a/internal/cmd/plugin/logs/cluster.go b/internal/cmd/plugin/logs/cluster.go
index 66d8ce367c..0249ab9357 100644
--- a/internal/cmd/plugin/logs/cluster.go
+++ b/internal/cmd/plugin/logs/cluster.go
@@ -29,7 +29,10 @@ func clusterCmd() *cobra.Command {
Use: "cluster ",
Short: "Logs for cluster's pods",
Long: "Collects the logs for all pods in a cluster into a single stream or outputFile",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
cl.clusterName = args[0]
cl.namespace = plugin.Namespace
diff --git a/internal/cmd/plugin/logs/cluster_test.go b/internal/cmd/plugin/logs/cluster_test.go
index c5d302777b..7b2d20bfba 100644
--- a/internal/cmd/plugin/logs/cluster_test.go
+++ b/internal/cmd/plugin/logs/cluster_test.go
@@ -60,8 +60,8 @@ var _ = Describe("Test the command", func() {
Build()
It("should get the command help", func() {
cmd := clusterCmd()
- err := cmd.Execute()
- Expect(err).To(HaveOccurred())
+ // A panic happens when a tested function returns with os.Exit(0)
+ Expect(func() { _ = cmd.Execute() }).Should(Panic())
})
It("should not fail, with cluster name as argument", func() {
diff --git a/internal/cmd/plugin/maintenance/cmd.go b/internal/cmd/plugin/maintenance/cmd.go
index 1e49c157e9..c9d12ac934 100644
--- a/internal/cmd/plugin/maintenance/cmd.go
+++ b/internal/cmd/plugin/maintenance/cmd.go
@@ -20,6 +20,8 @@ import (
"fmt"
"github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
)
// NewCmd creates the new 'maintenance' command
@@ -38,7 +40,10 @@ func NewCmd() *cobra.Command {
Short: "Sets maintenance mode",
Long: "This command will set maintenance mode on a single cluster or on all clusters " +
"in the current namespace if not specified differently through flags",
- Args: cobra.MaximumNArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
var clusterName string
if len(args) > 0 {
@@ -56,7 +61,10 @@ func NewCmd() *cobra.Command {
Short: "Removes maintenance mode",
Long: "This command will unset maintenance mode on a single cluster or on all clusters " +
"in the current namespace if not specified differently through flags",
- Args: cobra.MaximumNArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
var clusterName string
if len(args) > 0 {
diff --git a/internal/cmd/plugin/pgadmin/cmd.go b/internal/cmd/plugin/pgadmin/cmd.go
index de8310a100..717e95024f 100644
--- a/internal/cmd/plugin/pgadmin/cmd.go
+++ b/internal/cmd/plugin/pgadmin/cmd.go
@@ -75,7 +75,7 @@ func NewCmd() *cobra.Command {
pgadminCmd := &cobra.Command{
Use: "pgadmin4 [name]",
- Short: "Creates a pgadmin deployment.",
+ Short: "Creates a pgadmin deployment",
Args: cobra.MinimumNArgs(1),
Long: `Creates a pgadmin deployment configured to work with a CNPG Cluster.`,
Example: pgadminExample,
diff --git a/internal/cmd/plugin/plugin.go b/internal/cmd/plugin/plugin.go
index 0a8ab31c8c..8bd2e7cc4b 100644
--- a/internal/cmd/plugin/plugin.go
+++ b/internal/cmd/plugin/plugin.go
@@ -21,9 +21,11 @@ import (
"context"
"fmt"
"os"
+ "strings"
"time"
storagesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1"
+ "github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
@@ -138,3 +140,53 @@ func GetPGControlData(
return stdout, nil
}
+
+// completeClusters is mainly used inside the unit tests
+func completeClusters(
+ ctx context.Context,
+ cli client.Client,
+ namespace string,
+ args []string,
+ toComplete string,
+) []string {
+ var clusters apiv1.ClusterList
+
+ // Since all our commands work on one cluster, if we already have one in the list
+ // we just return an empty set of strings
+ if len(args) == 1 {
+ return []string{}
+ }
+
+ // Get the cluster lists object if error we just return empty array string
+ if err := cli.List(ctx, &clusters, client.InNamespace(namespace)); err != nil {
+ // We can't list the clusters, so we cannot provide any completion.
+ // Unfortunately there's no way for us to provide an error message
+ // notifying the user of what is happening.
+ return []string{}
+ }
+
+ clustersNames := make([]string, 0, len(clusters.Items))
+ for _, cluster := range clusters.Items {
+ if len(toComplete) == 0 || strings.HasPrefix(cluster.Name, toComplete) {
+ clustersNames = append(clustersNames, cluster.Name)
+ }
+ }
+
+ return clustersNames
+}
+
+// CompleteClusters will complete the cluster name when necessary getting the list from the current namespace
+func CompleteClusters(ctx context.Context, args []string, toComplete string) []string {
+ return completeClusters(ctx, Client, Namespace, args, toComplete)
+}
+
+// RequiresArguments will show the help message in case no argument has been provided
+func RequiresArguments(nArgs int) cobra.PositionalArgs {
+ return func(cmd *cobra.Command, args []string) error {
+ if len(args) < nArgs {
+ _ = cmd.Help()
+ os.Exit(0)
+ }
+ return nil
+ }
+}
diff --git a/internal/cmd/plugin/plugin_test.go b/internal/cmd/plugin/plugin_test.go
index 8b4ba203aa..b6cfebe70f 100644
--- a/internal/cmd/plugin/plugin_test.go
+++ b/internal/cmd/plugin/plugin_test.go
@@ -17,6 +17,13 @@ limitations under the License.
package plugin
import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -28,3 +35,52 @@ var _ = Describe("create client", func() {
Expect(Client).NotTo(BeNil())
})
})
+
+var _ = Describe("CompleteClusters testing", func() {
+ const namespace = "default"
+ var client k8client.Client
+
+ BeforeEach(func() {
+ cluster1 := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster1",
+ Namespace: namespace,
+ },
+ }
+ cluster2 := &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster2",
+ Namespace: namespace,
+ },
+ }
+
+ client = fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()).
+ WithObjects(cluster1, cluster2).Build()
+ })
+
+ It("should return matching cluster names", func(ctx SpecContext) {
+ toComplete := "clu"
+ result := completeClusters(ctx, client, namespace, []string{}, toComplete)
+ Expect(result).To(HaveLen(2))
+ Expect(result).To(ConsistOf("cluster1", "cluster2"))
+ })
+
+ It("should return empty array when no clusters found", func(ctx SpecContext) {
+ toComplete := "nonexistent"
+ result := completeClusters(ctx, client, namespace, []string{}, toComplete)
+ Expect(result).To(BeEmpty())
+ })
+
+ It("should skip clusters with prefix not matching toComplete", func(ctx SpecContext) {
+ toComplete := "nonexistent"
+ result := completeClusters(ctx, client, namespace, []string{}, toComplete)
+ Expect(result).To(BeEmpty())
+ })
+
+ It("should return nothing when a cluster name is already on the arguments list", func(ctx SpecContext) {
+ args := []string{"cluster-example"}
+ toComplete := "cluster-"
+ result := completeClusters(ctx, client, namespace, args, toComplete)
+ Expect(result).To(BeEmpty())
+ })
+})
diff --git a/internal/cmd/plugin/psql/cmd.go b/internal/cmd/plugin/psql/cmd.go
index 0280eb8a8f..3910021464 100644
--- a/internal/cmd/plugin/psql/cmd.go
+++ b/internal/cmd/plugin/psql/cmd.go
@@ -34,25 +34,28 @@ func NewCmd() *cobra.Command {
Use: "psql [cluster] [-- psqlArgs...]",
Short: "Start a psql session targeting a CloudNativePG cluster",
Args: validatePsqlArgs,
- Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.",
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
+ Long: "This command will start an interactive psql session inside a PostgreSQL Pod created by CloudNativePG.",
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
psqlArgs := args[1:]
- psqlOptions := psqlCommandOptions{
- replica: replica,
- namespace: plugin.Namespace,
- allocateTTY: allocateTTY,
- passStdin: passStdin,
- args: psqlArgs,
- name: clusterName,
+ psqlOptions := CommandOptions{
+ Replica: replica,
+ Namespace: plugin.Namespace,
+ AllocateTTY: allocateTTY,
+ PassStdin: passStdin,
+ Args: psqlArgs,
+ Name: clusterName,
}
- psqlCommand, err := newPsqlCommand(cmd.Context(), psqlOptions)
+ psqlCommand, err := NewCommand(cmd.Context(), psqlOptions)
if err != nil {
return err
}
- return psqlCommand.exec()
+ return psqlCommand.Exec()
},
}
diff --git a/internal/cmd/plugin/psql/psql.go b/internal/cmd/plugin/psql/psql.go
index f44c3724e1..1207f50188 100644
--- a/internal/cmd/plugin/psql/psql.go
+++ b/internal/cmd/plugin/psql/psql.go
@@ -36,9 +36,9 @@ const (
kubectlCommand = "kubectl"
)
-// psqlCommand is the launcher of `psql` with `kubectl exec`
-type psqlCommand struct {
- psqlCommandOptions
+// Command is the launcher of `psql` with `kubectl exec`
+type Command struct {
+ CommandOptions
// The list of possible pods where to launch psql
podList []corev1.Pod
@@ -47,37 +47,37 @@ type psqlCommand struct {
kubectlPath string
}
-// psqlCommandOptions are the options required to start psql
-type psqlCommandOptions struct {
- // Require a connection to a replica
- replica bool
+// CommandOptions are the options required to start psql
+type CommandOptions struct {
+ // Require a connection to a Replica
+ Replica bool
- // The cluster name
- name string
+ // The cluster Name
+ Name string
- // The namespace where we're working in
- namespace string
+ // The Namespace where we're working in
+ Namespace string
// Whether we should we allocate a TTY for psql
- allocateTTY bool
+ AllocateTTY bool
// Whether we should we pass stdin to psql
- passStdin bool
+ PassStdin bool
// Arguments to pass to psql
- args []string
+ Args []string
}
-// newPsqlCommand creates a new psql command
-func newPsqlCommand(
+// NewCommand creates a new psql command
+func NewCommand(
ctx context.Context,
- options psqlCommandOptions,
-) (*psqlCommand, error) {
+ options CommandOptions,
+) (*Command, error) {
var pods corev1.PodList
if err := plugin.Client.List(
ctx,
&pods,
- client.MatchingLabels{utils.ClusterLabelName: options.name},
+ client.MatchingLabels{utils.ClusterLabelName: options.Name},
client.InNamespace(plugin.Namespace),
); err != nil {
return nil, err
@@ -88,26 +88,26 @@ func newPsqlCommand(
return nil, fmt.Errorf("while getting kubectl path: %w", err)
}
- return &psqlCommand{
- psqlCommandOptions: options,
- podList: pods.Items,
- kubectlPath: kubectlPath,
+ return &Command{
+ CommandOptions: options,
+ podList: pods.Items,
+ kubectlPath: kubectlPath,
}, nil
}
// getKubectlInvocation gets the kubectl command to be executed
-func (psql *psqlCommand) getKubectlInvocation() ([]string, error) {
- result := make([]string, 0, 11+len(psql.args))
+func (psql *Command) getKubectlInvocation() ([]string, error) {
+ result := make([]string, 0, 11+len(psql.Args))
result = append(result, "kubectl", "exec")
- if psql.allocateTTY {
+ if psql.AllocateTTY {
result = append(result, "-t")
}
- if psql.passStdin {
+ if psql.PassStdin {
result = append(result, "-i")
}
- if len(psql.namespace) > 0 {
- result = append(result, "-n", psql.namespace)
+ if len(psql.Namespace) > 0 {
+ result = append(result, "-n", psql.Namespace)
}
result = append(result, "-c", specs.PostgresContainerName)
@@ -118,14 +118,14 @@ func (psql *psqlCommand) getKubectlInvocation() ([]string, error) {
result = append(result, podName)
result = append(result, "--", "psql")
- result = append(result, psql.args...)
+ result = append(result, psql.Args...)
return result, nil
}
// getPodName get the first Pod name with the required role
-func (psql *psqlCommand) getPodName() (string, error) {
+func (psql *Command) getPodName() (string, error) {
targetPodRole := specs.ClusterRoleLabelPrimary
- if psql.replica {
+ if psql.Replica {
targetPodRole = specs.ClusterRoleLabelReplica
}
@@ -139,9 +139,9 @@ func (psql *psqlCommand) getPodName() (string, error) {
return "", &ErrMissingPod{role: targetPodRole}
}
-// exec replaces the current process with a `kubectl exec` invocation.
+// Exec replaces the current process with a `kubectl Exec` invocation.
// This function won't return
-func (psql *psqlCommand) exec() error {
+func (psql *Command) Exec() error {
kubectlExec, err := psql.getKubectlInvocation()
if err != nil {
return err
@@ -155,6 +155,32 @@ func (psql *psqlCommand) exec() error {
return nil
}
+// Run starts a psql process inside the target pod
+func (psql *Command) Run() error {
+ kubectlExec, err := psql.getKubectlInvocation()
+ if err != nil {
+ return err
+ }
+
+ cmd := exec.Command(psql.kubectlPath, kubectlExec[1:]...) // nolint:gosec
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+}
+
+// Output starts a psql process inside the target pod
+// and returns its stdout
+func (psql *Command) Output() ([]byte, error) {
+ kubectlExec, err := psql.getKubectlInvocation()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd := exec.Command(psql.kubectlPath, kubectlExec[1:]...) // nolint:gosec
+ cmd.Stderr = os.Stderr
+ return cmd.Output()
+}
+
// ErrMissingPod is raised when we can't find a Pod having the desired role
type ErrMissingPod struct {
role string
diff --git a/internal/cmd/plugin/psql/psql_test.go b/internal/cmd/plugin/psql/psql_test.go
index 428d78772c..2104954188 100644
--- a/internal/cmd/plugin/psql/psql_test.go
+++ b/internal/cmd/plugin/psql/psql_test.go
@@ -35,9 +35,9 @@ var _ = Describe("psql launcher", func() {
}
It("selects the correct Pod when looking for a primary", func() {
- cmd := psqlCommand{
- psqlCommandOptions: psqlCommandOptions{
- replica: false,
+ cmd := Command{
+ CommandOptions: CommandOptions{
+ Replica: false,
},
podList: podList,
}
@@ -45,9 +45,9 @@ var _ = Describe("psql launcher", func() {
})
It("selects the correct Pod when looking for a replica", func() {
- cmd := psqlCommand{
- psqlCommandOptions: psqlCommandOptions{
- replica: true,
+ cmd := Command{
+ CommandOptions: CommandOptions{
+ Replica: true,
},
podList: podList,
}
@@ -61,9 +61,9 @@ var _ = Describe("psql launcher", func() {
fakePod("cluster-example-3", "oboe"),
}
- cmd := psqlCommand{
- psqlCommandOptions: psqlCommandOptions{
- replica: false,
+ cmd := Command{
+ CommandOptions: CommandOptions{
+ Replica: false,
},
podList: fakePodList,
}
@@ -74,12 +74,12 @@ var _ = Describe("psql launcher", func() {
})
It("correctly composes a kubectl exec command line", func() {
- cmd := psqlCommand{
- psqlCommandOptions: psqlCommandOptions{
- replica: true,
- allocateTTY: true,
- passStdin: true,
- namespace: "default",
+ cmd := Command{
+ CommandOptions: CommandOptions{
+ Replica: true,
+ AllocateTTY: true,
+ PassStdin: true,
+ Namespace: "default",
},
podList: podList,
}
@@ -99,11 +99,11 @@ var _ = Describe("psql launcher", func() {
})
It("correctly composes a kubectl exec command line with psql args", func() {
- cmd := psqlCommand{
- psqlCommandOptions: psqlCommandOptions{
- replica: true,
- namespace: "default",
- args: []string{
+ cmd := Command{
+ CommandOptions: CommandOptions{
+ Replica: true,
+ Namespace: "default",
+ Args: []string{
"-c",
"select 1",
},
diff --git a/internal/cmd/plugin/reload/cmd.go b/internal/cmd/plugin/reload/cmd.go
index 2850cbabf6..1f9a6bb1ab 100644
--- a/internal/cmd/plugin/reload/cmd.go
+++ b/internal/cmd/plugin/reload/cmd.go
@@ -20,6 +20,8 @@ import (
"context"
"github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
)
// NewCmd creates the new "reset" command
@@ -28,7 +30,10 @@ func NewCmd() *cobra.Command {
Use: "reload [clusterName]",
Short: `Reload the cluster`,
Long: `Triggers a reconciliation loop for all the cluster's instances, rolling out new configurations if present.`,
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(_ *cobra.Command, args []string) error {
ctx := context.Background()
clusterName := args[0]
diff --git a/internal/cmd/plugin/report/cluster.go b/internal/cmd/plugin/report/cluster.go
index e1796cff24..5e5920db37 100644
--- a/internal/cmd/plugin/report/cluster.go
+++ b/internal/cmd/plugin/report/cluster.go
@@ -31,12 +31,14 @@ func clusterCmd() *cobra.Command {
)
const filePlaceholder = "report_cluster__.zip"
-
cmd := &cobra.Command{
Use: "cluster ",
Short: "Report cluster resources, pods, events, logs (opt-in)",
Long: "Collects combined information on the cluster in a Zip file",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
clusterName := args[0]
now := time.Now().UTC()
diff --git a/internal/cmd/plugin/snapshot/cmd.go b/internal/cmd/plugin/snapshot/cmd.go
index 3bb6c04ec7..8abd4a0752 100644
--- a/internal/cmd/plugin/snapshot/cmd.go
+++ b/internal/cmd/plugin/snapshot/cmd.go
@@ -21,6 +21,8 @@ import (
"fmt"
"github.com/spf13/cobra"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cmd/plugin"
)
// NewCmd implements the `snapshot` subcommand
@@ -29,6 +31,9 @@ func NewCmd() *cobra.Command {
Use: "snapshot ",
Short: "command removed",
Long: "Replaced by `kubectl cnpg backup -m volumeSnapshot`",
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(_ *cobra.Command, _ []string) error {
fmt.Println("This command was replaced by `kubectl cnpg backup -m volumeSnapshot`")
fmt.Println("IMPORTANT: if you are using VolumeSnapshots on 1.20, you should upgrade to the latest minor release")
diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go
index f90175d8d7..4ca0f70a63 100644
--- a/internal/cmd/plugin/status/cmd.go
+++ b/internal/cmd/plugin/status/cmd.go
@@ -17,7 +17,8 @@ limitations under the License.
package status
import (
- "context"
+ "fmt"
+ "strings"
"github.com/spf13/cobra"
@@ -29,9 +30,15 @@ func NewCmd() *cobra.Command {
statusCmd := &cobra.Command{
Use: "status [cluster]",
Short: "Get the status of a PostgreSQL cluster",
- Args: cobra.ExactArgs(1),
+ Args: plugin.RequiresArguments(1),
+ ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
+ if strings.HasPrefix(toComplete, "-") {
+ fmt.Printf("%+v\n", toComplete)
+ }
+ return plugin.CompleteClusters(cmd.Context(), args, toComplete), cobra.ShellCompDirectiveNoFileComp
+ },
RunE: func(cmd *cobra.Command, args []string) error {
- ctx := context.Background()
+ ctx := cmd.Context()
clusterName := args[0]
verbose, _ := cmd.Flags().GetBool("verbose")
diff --git a/internal/cnpi/plugin/client/backup.go b/internal/cnpi/plugin/client/backup.go
new file mode 100644
index 0000000000..b99ad08438
--- /dev/null
+++ b/internal/cnpi/plugin/client/backup.go
@@ -0,0 +1,167 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/backup"
+ "github.com/cloudnative-pg/cnpg-i/pkg/identity"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+var (
+ // ErrPluginNotLoaded is raised when the plugin that should manage the backup
+ // have not been loaded inside the cluster
+ ErrPluginNotLoaded = errors.New("plugin not loaded")
+
+ // ErrPluginNotSupportBackup is raised when the plugin that should manage the backup
+ // doesn't support the Backup service
+ ErrPluginNotSupportBackup = errors.New("plugin does not support Backup service")
+
+ // ErrPluginNotSupportBackupEndpoint is raised when the plugin that should manage the backup
+ // doesn't support the Backup RPC endpoint
+ ErrPluginNotSupportBackupEndpoint = errors.New("plugin does not support the Backup RPC call")
+)
+
+// BackupResponse is the status of a newly created backup. This is used as a return
+// type for the Backup RPC Call
+type BackupResponse struct {
+ // This field contains a machine-readable ID of the
+ // backup that is being taken
+ BackupID string
+
+ // This field contains a human-readable name of the
+ // backup that is being taken
+ BackupName string
+
+ // This field contains the timestamp of the start
+ // time of the backup
+ StartedAt time.Time
+
+ // This field contains the Unix timestamp of the end
+ // time of the backup
+ StoppedAt time.Time
+
+ // This field contains the current WAL when the backup was started
+ BeginWal string
+
+ // This field contains the current WAL at the end of the backup
+ EndWal string
+
+ // This field contains the current LSN record when the backup was started
+ BeginLsn string
+
+ // This field contains the current LSN record when the backup has finished
+ EndLsn string
+
+ // This field contains the backup label of the backup that have been taken
+ BackupLabelFile []byte
+
+ // This field contains the tablespace map of the backup that have been taken
+ TablespaceMapFile []byte
+
+ // This field contains the ID of the instance that have been backed up
+ InstanceID string
+
+ // This field is set to true for online/hot backups and to false otherwise.
+ Online bool
+}
+
+func (data *data) Backup(
+ ctx context.Context,
+ cluster client.Object,
+ backupObject client.Object,
+ pluginName string,
+ parameters map[string]string,
+) (*BackupResponse, error) {
+ contextLogger := log.FromContext(ctx)
+
+ serializedCluster, err := json.Marshal(cluster)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ )
+ }
+
+ serializedBackup, err := json.Marshal(backupObject)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ backupObject.GetObjectKind().GroupVersionKind().Kind,
+ backupObject.GetNamespace(), backupObject.GetName(),
+ err,
+ )
+ }
+
+ plugin, err := data.getPlugin(pluginName)
+ if err != nil {
+ return nil, err
+ }
+
+ if !slices.Contains(plugin.capabilities, identity.PluginCapability_Service_TYPE_BACKUP_SERVICE) {
+ return nil, ErrPluginNotSupportBackup
+ }
+
+ if !slices.Contains(plugin.backupCapabilities, backup.BackupCapability_RPC_TYPE_BACKUP) {
+ return nil, ErrPluginNotSupportBackupEndpoint
+ }
+
+ contextLogger = contextLogger.WithValues(
+ "pluginName", pluginName,
+ )
+
+ request := backup.BackupRequest{
+ ClusterDefinition: serializedCluster,
+ BackupDefinition: serializedBackup,
+ Parameters: parameters,
+ }
+
+ contextLogger.Trace(
+ "Calling Backup endpoint",
+ "clusterDefinition", request.ClusterDefinition,
+ "parameters", parameters)
+
+ result, err := plugin.backupClient.Backup(ctx, &request)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling Backup, failing")
+ return nil, err
+ }
+
+ return &BackupResponse{
+ BackupID: result.BackupId,
+ BackupName: result.BackupName,
+ StartedAt: time.Unix(result.StartedAt, 0),
+ StoppedAt: time.Unix(result.StoppedAt, 0),
+ BeginWal: result.BeginWal,
+ EndWal: result.EndWal,
+ BeginLsn: result.BeginLsn,
+ EndLsn: result.EndLsn,
+ BackupLabelFile: result.BackupLabelFile,
+ TablespaceMapFile: result.TablespaceMapFile,
+ InstanceID: result.InstanceId,
+ Online: result.Online,
+ }, nil
+}
diff --git a/internal/cnpi/plugin/client/cluster.go b/internal/cnpi/plugin/client/cluster.go
new file mode 100644
index 0000000000..d6749a3e01
--- /dev/null
+++ b/internal/cnpi/plugin/client/cluster.go
@@ -0,0 +1,210 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/operator"
+ jsonpatch "github.com/evanphx/json-patch/v5"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+func (data *data) MutateCluster(ctx context.Context, object client.Object, mutatedObject client.Object) error {
+ contextLogger := log.FromContext(ctx)
+
+ serializedObject, err := json.Marshal(object)
+ if err != nil {
+ return fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ object.GetObjectKind().GroupVersionKind().Kind,
+ object.GetNamespace(), object.GetName(),
+ err,
+ )
+ }
+
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if !slices.Contains(plugin.operatorCapabilities, operator.OperatorCapability_RPC_TYPE_MUTATE_CLUSTER) {
+ continue
+ }
+
+ contextLogger := contextLogger.WithValues(
+ "pluginName", plugin.name,
+ )
+ request := operator.OperatorMutateClusterRequest{
+ Definition: serializedObject,
+ }
+
+ contextLogger.Trace("Calling MutateCluster endpoint", "definition", request.Definition)
+ result, err := plugin.operatorClient.MutateCluster(ctx, &request)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling MutateCluster")
+ return err
+ }
+
+ if len(result.JsonPatch) == 0 {
+ // There's nothing to mutate
+ continue
+ }
+
+ patch, err := jsonpatch.DecodePatch(result.JsonPatch)
+ if err != nil {
+ contextLogger.Error(err, "Error while decoding JSON patch from plugin", "patch", result.JsonPatch)
+ return err
+ }
+
+ mutatedObject, err := patch.Apply(serializedObject)
+ if err != nil {
+ contextLogger.Error(err, "Error while applying JSON patch from plugin", "patch", result.JsonPatch)
+ return err
+ }
+
+ serializedObject = mutatedObject
+ }
+
+ if err := json.Unmarshal(serializedObject, mutatedObject); err != nil {
+ return fmt.Errorf("while deserializing %s %s/%s to JSON: %w",
+ object.GetObjectKind().GroupVersionKind().Kind,
+ object.GetNamespace(), object.GetName(),
+ err,
+ )
+ }
+
+ return nil
+}
+
+func (data *data) ValidateClusterCreate(
+ ctx context.Context,
+ object client.Object,
+) (field.ErrorList, error) {
+ contextLogger := log.FromContext(ctx)
+
+ serializedObject, err := json.Marshal(object)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ object.GetObjectKind().GroupVersionKind().Kind,
+ object.GetNamespace(), object.GetName(),
+ err,
+ )
+ }
+
+ var validationErrors []*operator.ValidationError
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if !slices.Contains(plugin.operatorCapabilities, operator.OperatorCapability_RPC_TYPE_VALIDATE_CLUSTER_CREATE) {
+ continue
+ }
+
+ contextLogger := contextLogger.WithValues(
+ "pluginName", plugin.name,
+ )
+ request := operator.OperatorValidateClusterCreateRequest{
+ Definition: serializedObject,
+ }
+
+ contextLogger.Trace("Calling ValidatedClusterCreate endpoint", "definition", request.Definition)
+ result, err := plugin.operatorClient.ValidateClusterCreate(ctx, &request)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling ValidatedClusterCreate")
+ return nil, err
+ }
+
+ validationErrors = append(validationErrors, result.ValidationErrors...)
+ }
+
+ return validationErrorsToErrorList(validationErrors), nil
+}
+
+func (data *data) ValidateClusterUpdate(
+ ctx context.Context,
+ oldObject client.Object,
+ newObject client.Object,
+) (field.ErrorList, error) {
+ contextLogger := log.FromContext(ctx)
+
+ serializedOldObject, err := json.Marshal(oldObject)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ oldObject.GetObjectKind().GroupVersionKind().Kind,
+ oldObject.GetNamespace(), oldObject.GetName(),
+ err,
+ )
+ }
+
+ serializedNewObject, err := json.Marshal(newObject)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ newObject.GetObjectKind().GroupVersionKind().Kind,
+ newObject.GetNamespace(), newObject.GetName(),
+ err,
+ )
+ }
+
+ var validationErrors []*operator.ValidationError
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if !slices.Contains(plugin.operatorCapabilities, operator.OperatorCapability_RPC_TYPE_VALIDATE_CLUSTER_CHANGE) {
+ continue
+ }
+
+ contextLogger := contextLogger.WithValues(
+ "pluginName", plugin.name,
+ )
+ request := operator.OperatorValidateClusterChangeRequest{
+ OldCluster: serializedOldObject,
+ NewCluster: serializedNewObject,
+ }
+
+ contextLogger.Trace(
+ "Calling ValidateClusterChange endpoint",
+ "oldCluster", request.OldCluster,
+ "newCluster", request.NewCluster)
+ result, err := plugin.operatorClient.ValidateClusterChange(ctx, &request)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling ValidatedClusterCreate")
+ return nil, err
+ }
+
+ validationErrors = append(validationErrors, result.ValidationErrors...)
+ }
+
+ return validationErrorsToErrorList(validationErrors), nil
+}
+
+// validationErrorsToErrorList makes up a list of validation errors as required by
+// the Kubernetes API from the GRPC plugin interface types
+func validationErrorsToErrorList(validationErrors []*operator.ValidationError) (result field.ErrorList) {
+ result = make(field.ErrorList, len(validationErrors))
+ for i, validationError := range validationErrors {
+ result[i] = field.Invalid(
+ field.NewPath(validationError.PathComponents[0], validationError.PathComponents[1:]...),
+ validationError.Value,
+ validationError.Message,
+ )
+ }
+
+ return result
+}
diff --git a/internal/cnpi/plugin/client/connection.go b/internal/cnpi/plugin/client/connection.go
new file mode 100644
index 0000000000..2b2be7d7f7
--- /dev/null
+++ b/internal/cnpi/plugin/client/connection.go
@@ -0,0 +1,403 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "path"
+ "slices"
+ "time"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/backup"
+ "github.com/cloudnative-pg/cnpg-i/pkg/identity"
+ "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
+ "github.com/cloudnative-pg/cnpg-i/pkg/operator"
+ "github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
+ "github.com/cloudnative-pg/cnpg-i/pkg/wal"
+ "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/timeout"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+// defaultTimeout is the timeout applied by default to every GRPC call
+const defaultTimeout = 30 * time.Second
+
+type protocol interface {
+ dial(ctx context.Context, path string) (connectionHandler, error)
+}
+
+type connectionHandler interface {
+ grpc.ClientConnInterface
+ io.Closer
+}
+
+type protocolUnix string
+
+func (p protocolUnix) dial(ctx context.Context, path string) (connectionHandler, error) {
+ contextLogger := log.FromContext(ctx)
+ dialPath := fmt.Sprintf("unix://%s", path)
+
+ contextLogger.Debug("Connecting to plugin", "path", dialPath)
+
+ return grpc.Dial(
+ dialPath,
+ grpc.WithTransportCredentials(insecure.NewCredentials()),
+ grpc.WithUnaryInterceptor(
+ timeout.UnaryClientInterceptor(defaultTimeout),
+ ),
+ )
+}
+
+// data represent a new CNPI client collection
+type data struct {
+ pluginPath string
+ protocol protocol
+ plugins []pluginData
+}
+
+func (data *data) getPlugin(pluginName string) (*pluginData, error) {
+ selectedPluginIdx := -1
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if plugin.name == pluginName {
+ selectedPluginIdx = idx
+ break
+ }
+ }
+
+ if selectedPluginIdx == -1 {
+ return nil, ErrPluginNotLoaded
+ }
+
+ return &data.plugins[selectedPluginIdx], nil
+}
+
+type pluginData struct {
+ connection connectionHandler
+ identityClient identity.IdentityClient
+ operatorClient operator.OperatorClient
+ lifecycleClient lifecycle.OperatorLifecycleClient
+ walClient wal.WALClient
+ backupClient backup.BackupClient
+ reconcilerHooksClient reconciler.ReconcilerHooksClient
+
+ name string
+ version string
+ capabilities []identity.PluginCapability_Service_Type
+ operatorCapabilities []operator.OperatorCapability_RPC_Type
+ walCapabilities []wal.WALCapability_RPC_Type
+ lifecycleCapabilities []*lifecycle.OperatorLifecycleCapabilities
+ backupCapabilities []backup.BackupCapability_RPC_Type
+ reconcilerCapabilities []reconciler.ReconcilerHooksCapability_Kind
+}
+
+// NewUnixSocketClient creates a new CNPI client discovering plugins
+// registered in a specific path
+func NewUnixSocketClient(pluginPath string) Client {
+ return &data{
+ pluginPath: pluginPath,
+ protocol: protocolUnix(""),
+ }
+}
+
+func (data *data) Load(ctx context.Context, name string) error {
+ pluginData, err := data.loadPlugin(ctx, name)
+ if err != nil {
+ return err
+ }
+
+ data.plugins = append(data.plugins, pluginData)
+ return nil
+}
+
+func (data *data) MetadataList() []Metadata {
+ result := make([]Metadata, len(data.plugins))
+ for i := range data.plugins {
+ result[i] = data.plugins[i].Metadata()
+ }
+
+ return result
+}
+
+func (data *data) loadPlugin(ctx context.Context, name string) (pluginData, error) {
+ var connection connectionHandler
+ var err error
+
+ defer func() {
+ if err != nil && connection != nil {
+ _ = connection.Close()
+ }
+ }()
+
+ contextLogger := log.FromContext(ctx).WithValues("pluginName", name)
+ ctx = log.IntoContext(ctx, contextLogger)
+
+ if connection, err = data.protocol.dial(
+ ctx,
+ path.Join(data.pluginPath, name),
+ ); err != nil {
+ contextLogger.Error(err, "Error while connecting to plugin")
+ return pluginData{}, err
+ }
+
+ var result pluginData
+ result, err = newPluginDataFromConnection(ctx, connection)
+ if err != nil {
+ return pluginData{}, err
+ }
+
+ // Load the list of services implemented by the plugin
+ if err = result.loadPluginCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+
+ // If the plugin implements the Operator service, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_OPERATOR_SERVICE) {
+ if err = result.loadOperatorCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+ }
+
+ // If the plugin implements the lifecycle service, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_LIFECYCLE_SERVICE) {
+ if err = result.loadLifecycleCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+ }
+
+ // If the plugin implements the WAL service, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_WAL_SERVICE) {
+ if err = result.loadWALCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+ }
+
+ // If the plugin implements the backup service, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_BACKUP_SERVICE) {
+ if err = result.loadBackupCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+ }
+
+ // If the plugin implements the reconciler hooks, load its
+ // capabilities
+ if slices.Contains(result.capabilities, identity.PluginCapability_Service_TYPE_RECONCILER_HOOKS) {
+ if err = result.loadReconcilerHooksCapabilities(ctx); err != nil {
+ return pluginData{}, err
+ }
+ }
+
+ return result, nil
+}
+
+func (data *data) Close(ctx context.Context) {
+ contextLogger := log.FromContext(ctx)
+ for i := range data.plugins {
+ plugin := &data.plugins[i]
+ contextLogger := contextLogger.WithValues("pluginName", plugin.name)
+
+ if err := plugin.connection.Close(); err != nil {
+ contextLogger.Error(err, "while closing plugin connection")
+ }
+ }
+
+ data.plugins = nil
+}
+
+func newPluginDataFromConnection(ctx context.Context, connection connectionHandler) (pluginData, error) {
+ var err error
+
+ identityClient := identity.NewIdentityClient(connection)
+
+ var pluginInfoResponse *identity.GetPluginMetadataResponse
+
+ if pluginInfoResponse, err = identityClient.GetPluginMetadata(
+ ctx,
+ &identity.GetPluginMetadataRequest{},
+ ); err != nil {
+ return pluginData{}, fmt.Errorf("while querying plugin identity: %w", err)
+ }
+
+ result := pluginData{}
+ result.connection = connection
+ result.name = pluginInfoResponse.Name
+ result.version = pluginInfoResponse.Version
+ result.identityClient = identity.NewIdentityClient(connection)
+ result.operatorClient = operator.NewOperatorClient(connection)
+ result.lifecycleClient = lifecycle.NewOperatorLifecycleClient(connection)
+ result.walClient = wal.NewWALClient(connection)
+ result.backupClient = backup.NewBackupClient(connection)
+ result.reconcilerHooksClient = reconciler.NewReconcilerHooksClient(connection)
+
+ return result, err
+}
+
+func (pluginData *pluginData) loadPluginCapabilities(ctx context.Context) error {
+ var pluginCapabilitiesResponse *identity.GetPluginCapabilitiesResponse
+ var err error
+
+ if pluginCapabilitiesResponse, err = pluginData.identityClient.GetPluginCapabilities(
+ ctx,
+ &identity.GetPluginCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin capabilities: %w", err)
+ }
+
+ pluginData.capabilities = make([]identity.PluginCapability_Service_Type, len(pluginCapabilitiesResponse.Capabilities))
+ for i := range pluginData.capabilities {
+ pluginData.capabilities[i] = pluginCapabilitiesResponse.Capabilities[i].GetService().Type
+ }
+
+ return nil
+}
+
+func (pluginData *pluginData) loadOperatorCapabilities(ctx context.Context) error {
+ var operatorCapabilitiesResponse *operator.OperatorCapabilitiesResult
+ var err error
+
+ if operatorCapabilitiesResponse, err = pluginData.operatorClient.GetCapabilities(
+ ctx,
+ &operator.OperatorCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin operator capabilities: %w", err)
+ }
+
+ pluginData.operatorCapabilities = make(
+ []operator.OperatorCapability_RPC_Type,
+ len(operatorCapabilitiesResponse.Capabilities))
+ for i := range pluginData.operatorCapabilities {
+ pluginData.operatorCapabilities[i] = operatorCapabilitiesResponse.Capabilities[i].GetRpc().Type
+ }
+
+ return nil
+}
+
+func (pluginData *pluginData) loadLifecycleCapabilities(ctx context.Context) error {
+ var lifecycleCapabilitiesResponse *lifecycle.OperatorLifecycleCapabilitiesResponse
+ var err error
+ if lifecycleCapabilitiesResponse, err = pluginData.lifecycleClient.GetCapabilities(
+ ctx,
+ &lifecycle.OperatorLifecycleCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin lifecycle capabilities: %w", err)
+ }
+
+ pluginData.lifecycleCapabilities = lifecycleCapabilitiesResponse.LifecycleCapabilities
+ return nil
+}
+
+func (pluginData *pluginData) loadReconcilerHooksCapabilities(ctx context.Context) error {
+ var reconcilerHooksCapabilitiesResult *reconciler.ReconcilerHooksCapabilitiesResult
+ var err error
+ if reconcilerHooksCapabilitiesResult, err = pluginData.reconcilerHooksClient.GetCapabilities(
+ ctx,
+ &reconciler.ReconcilerHooksCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin lifecycle capabilities: %w", err)
+ }
+
+ pluginData.reconcilerCapabilities = make(
+ []reconciler.ReconcilerHooksCapability_Kind,
+ len(reconcilerHooksCapabilitiesResult.ReconcilerCapabilities))
+
+ for i := range pluginData.reconcilerCapabilities {
+ pluginData.reconcilerCapabilities[i] = reconcilerHooksCapabilitiesResult.ReconcilerCapabilities[i].Kind
+ }
+ return nil
+}
+
+func (pluginData *pluginData) loadWALCapabilities(ctx context.Context) error {
+ var walCapabilitiesResponse *wal.WALCapabilitiesResult
+ var err error
+
+ if walCapabilitiesResponse, err = pluginData.walClient.GetCapabilities(
+ ctx,
+ &wal.WALCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin operator capabilities: %w", err)
+ }
+
+ pluginData.walCapabilities = make(
+ []wal.WALCapability_RPC_Type,
+ len(walCapabilitiesResponse.Capabilities))
+ for i := range pluginData.walCapabilities {
+ pluginData.walCapabilities[i] = walCapabilitiesResponse.Capabilities[i].GetRpc().Type
+ }
+
+ return nil
+}
+
+func (pluginData *pluginData) loadBackupCapabilities(ctx context.Context) error {
+ var backupCapabilitiesResponse *backup.BackupCapabilitiesResult
+ var err error
+
+ if backupCapabilitiesResponse, err = pluginData.backupClient.GetCapabilities(
+ ctx,
+ &backup.BackupCapabilitiesRequest{},
+ ); err != nil {
+ return fmt.Errorf("while querying plugin operator capabilities: %w", err)
+ }
+
+ pluginData.backupCapabilities = make(
+ []backup.BackupCapability_RPC_Type,
+ len(backupCapabilitiesResponse.Capabilities))
+ for i := range pluginData.backupCapabilities {
+ pluginData.backupCapabilities[i] = backupCapabilitiesResponse.Capabilities[i].GetRpc().Type
+ }
+
+ return nil
+}
+
+// Metadata extracts the plugin metadata reading from
+// the internal metadata
+func (pluginData *pluginData) Metadata() Metadata {
+ result := Metadata{
+ Name: pluginData.name,
+ Version: pluginData.version,
+ Capabilities: make([]string, len(pluginData.capabilities)),
+ OperatorCapabilities: make([]string, len(pluginData.operatorCapabilities)),
+ WALCapabilities: make([]string, len(pluginData.walCapabilities)),
+ BackupCapabilities: make([]string, len(pluginData.backupCapabilities)),
+ }
+
+ for i := range pluginData.capabilities {
+ result.Capabilities[i] = pluginData.capabilities[i].String()
+ }
+
+ for i := range pluginData.operatorCapabilities {
+ result.OperatorCapabilities[i] = pluginData.operatorCapabilities[i].String()
+ }
+
+ for i := range pluginData.walCapabilities {
+ result.WALCapabilities[i] = pluginData.walCapabilities[i].String()
+ }
+
+ for i := range pluginData.backupCapabilities {
+ result.BackupCapabilities[i] = pluginData.backupCapabilities[i].String()
+ }
+
+ return result
+}
diff --git a/internal/cnpi/plugin/client/contracts.go b/internal/cnpi/plugin/client/contracts.go
new file mode 100644
index 0000000000..580ddf187d
--- /dev/null
+++ b/internal/cnpi/plugin/client/contracts.go
@@ -0,0 +1,162 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/util/validation/field"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
+)
+
+// Metadata expose the metadata as discovered
+// from a plugin
+type Metadata struct {
+ Name string
+ Version string
+ Capabilities []string
+ OperatorCapabilities []string
+ WALCapabilities []string
+ BackupCapabilities []string
+}
+
+// Loader describes a struct capable of generating a plugin Client
+type Loader interface {
+ // LoadPluginClient creates a new plugin client, loading the plugins that are required
+ // by this cluster
+ LoadPluginClient(ctx context.Context) (Client, error)
+}
+
+// Client describes a set of behaviour needed to properly handle all the plugin client expected features
+type Client interface {
+ Connection
+ ClusterCapabilities
+ ClusterReconcilerHooks
+ LifecycleCapabilities
+ WalCapabilities
+ BackupCapabilities
+}
+
+// Connection describes a set of behaviour needed to properly handle the plugin connections
+type Connection interface {
+ // Load connect to the plugin with the specified name
+ Load(ctx context.Context, name string) error
+
+ // Close closes the connection to every loaded plugin
+ Close(ctx context.Context)
+
+ // MetadataList exposes the metadata of the loaded plugins
+ MetadataList() []Metadata
+}
+
+// ClusterCapabilities describes a set of behaviour needed to implement the Cluster capabilities
+type ClusterCapabilities interface {
+ // MutateCluster calls the loaded plugisn to help to enhance
+ // a cluster definition
+ MutateCluster(
+ ctx context.Context,
+ object client.Object,
+ mutatedObject client.Object,
+ ) error
+
+ // ValidateClusterCreate calls all the loaded plugin to check if a cluster definition
+ // is correct
+ ValidateClusterCreate(
+ ctx context.Context,
+ object client.Object,
+ ) (field.ErrorList, error)
+
+ // ValidateClusterUpdate calls all the loaded plugin to check if a cluster can
+ // be changed from a value to another
+ ValidateClusterUpdate(
+ ctx context.Context,
+ oldObject client.Object,
+ newObject client.Object,
+ ) (field.ErrorList, error)
+}
+
+// ReconcilerHookResult is the result of a reconciliation loop
+type ReconcilerHookResult struct {
+ Result ctrl.Result
+ Err error
+ StopReconciliation bool
+}
+
+// ClusterReconcilerHooks decsribes a set of behavior needed to enhance
+// the login of the Cluster reconcicliation loop
+type ClusterReconcilerHooks interface {
+ // PreReconcile is executed after we get the resources and update the status
+ PreReconcile(
+ ctx context.Context,
+ cluster client.Object,
+ object client.Object,
+ ) ReconcilerHookResult
+
+ // PostReconcile is executed at the end of the reconciliation loop
+ PostReconcile(
+ ctx context.Context,
+ cluster client.Object,
+ object client.Object,
+ ) ReconcilerHookResult
+}
+
+// LifecycleCapabilities describes a set of behaviour needed to implement the Lifecycle capabilities
+type LifecycleCapabilities interface {
+ // LifecycleHook notifies the registered plugins of a given event for a given object
+ LifecycleHook(
+ ctx context.Context,
+ operationVerb plugin.OperationVerb,
+ cluster client.Object,
+ object client.Object,
+ ) (client.Object, error)
+}
+
+// WalCapabilities describes a set of behavior needed to archive and recover WALs
+type WalCapabilities interface {
+ // ArchiveWAL calls the loaded plugins to archive a WAL file.
+ // This call is a no-op if there's no plugin implementing WAL archiving
+ ArchiveWAL(
+ ctx context.Context,
+ cluster client.Object,
+ sourceFileName string,
+ ) error
+
+ // RestoreWAL calls the loaded plugins to archive a WAL file.
+ // This call is a no-op if there's no plugin implementing WAL archiving
+ RestoreWAL(
+ ctx context.Context,
+ cluster client.Object,
+ sourceWALName string,
+ destinationFileName string,
+ ) error
+}
+
+// BackupCapabilities describes a set of behaviour needed to backup
+// a PostgreSQL cluster
+type BackupCapabilities interface {
+ // Backup takes a backup via a cnpg-i plugin
+ Backup(
+ ctx context.Context,
+ cluster client.Object,
+ backupObject client.Object,
+ pluginName string,
+ parameters map[string]string,
+ ) (*BackupResponse, error)
+}
diff --git a/internal/cnpi/plugin/client/doc.go b/internal/cnpi/plugin/client/doc.go
new file mode 100644
index 0000000000..1cb0e5ee6d
--- /dev/null
+++ b/internal/cnpi/plugin/client/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package client contains a set of helper structures for CNPG to use the
+// plugins exposing the CNPI interface
+package client
diff --git a/internal/cnpi/plugin/client/lifecycle.go b/internal/cnpi/plugin/client/lifecycle.go
new file mode 100644
index 0000000000..6a564234ba
--- /dev/null
+++ b/internal/cnpi/plugin/client/lifecycle.go
@@ -0,0 +1,158 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "slices"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
+ jsonpatch "github.com/evanphx/json-patch/v5"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/json"
+ "k8s.io/client-go/kubernetes/scheme"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+var runtimeScheme = runtime.NewScheme()
+
+func init() {
+ _ = scheme.AddToScheme(runtimeScheme)
+}
+
+func (data *data) LifecycleHook(
+ ctx context.Context,
+ operationType plugin.OperationVerb,
+ cluster client.Object,
+ object client.Object,
+) (client.Object, error) {
+ contextLogger := log.FromContext(ctx).WithName("lifecycle_hook")
+
+ typedOperationType, err := operationType.ToOperationType_Type()
+ if err != nil {
+ return nil, err
+ }
+ gvk := object.GetObjectKind().GroupVersionKind()
+ if gvk.Kind == "" || gvk.Version == "" {
+ gvk, err = apiutil.GVKForObject(object, runtimeScheme)
+ if err != nil {
+ contextLogger.Trace("skipping unknown object", "object", object)
+ // Skip unknown object
+ return nil, nil
+ }
+ }
+ object.GetObjectKind().SetGroupVersionKind(gvk)
+
+ var invokablePlugin []pluginData
+ for _, plg := range data.plugins {
+ for _, capability := range plg.lifecycleCapabilities {
+ if capability.Group != gvk.Group || capability.Kind != gvk.Kind {
+ continue
+ }
+
+ contained := slices.ContainsFunc(capability.OperationTypes, func(ot *lifecycle.OperatorOperationType) bool {
+ return ot.GetType() == typedOperationType
+ })
+
+ if !contained {
+ continue
+ }
+
+ invokablePlugin = append(invokablePlugin, plg)
+ }
+ }
+
+ if len(invokablePlugin) == 0 {
+ return object, nil
+ }
+
+ serializedCluster, err := json.Marshal(cluster)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ )
+ }
+
+ serializedObject, err := json.Marshal(object)
+ if err != nil {
+ return nil, fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ object.GetObjectKind().GroupVersionKind().Kind,
+ object.GetNamespace(), object.GetName(),
+ err,
+ )
+ }
+
+ serializedObjectOrig := make([]byte, len(serializedObject))
+ copy(serializedObjectOrig, serializedObject)
+ for _, plg := range invokablePlugin {
+ req := &lifecycle.OperatorLifecycleRequest{
+ OperationType: &lifecycle.OperatorOperationType{
+ Type: typedOperationType,
+ },
+ ClusterDefinition: serializedCluster,
+ ObjectDefinition: serializedObject,
+ }
+ result, err := plg.lifecycleClient.LifecycleHook(ctx, req)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling LifecycleHook")
+ return nil, err
+ }
+
+ if result == nil || len(result.JsonPatch) == 0 {
+ // There's nothing to mutate
+ continue
+ }
+
+ patch, err := jsonpatch.DecodePatch(result.JsonPatch)
+ if err != nil {
+ contextLogger.Error(err, "Error while decoding JSON patch from plugin", "patch", result.JsonPatch)
+ return nil, err
+ }
+
+ responseObj, err := patch.Apply(serializedObject)
+ if err != nil {
+ contextLogger.Error(err, "Error while applying JSON patch from plugin", "patch", result.JsonPatch)
+ return nil, err
+ }
+
+ serializedObject = responseObj
+ }
+
+ if reflect.DeepEqual(serializedObject, serializedObjectOrig) {
+ return object, nil
+ }
+
+ decoder := scheme.Codecs.UniversalDeserializer()
+ mutatedObject, _, err := decoder.Decode(serializedObject, nil, nil)
+ if err != nil {
+ return nil, fmt.Errorf("while deserializing %s %s/%s to JSON: %w",
+ object.GetObjectKind().GroupVersionKind().Kind,
+ object.GetNamespace(), object.GetName(),
+ err,
+ )
+ }
+
+ return mutatedObject.(client.Object), nil
+}
diff --git a/internal/cnpi/plugin/client/lifecycle_test.go b/internal/cnpi/plugin/client/lifecycle_test.go
new file mode 100644
index 0000000000..3631530652
--- /dev/null
+++ b/internal/cnpi/plugin/client/lifecycle_test.go
@@ -0,0 +1,241 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
+ "google.golang.org/grpc"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/json"
+ decoder "k8s.io/apimachinery/pkg/util/yaml"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+type fakeLifecycleClient struct {
+ capabilitiesError error
+ lifecycleHookError error
+ labelInjector map[string]string
+ capabilities []*lifecycle.OperatorLifecycleCapabilities
+}
+
+func newFakeLifecycleClient(
+ capabilities []*lifecycle.OperatorLifecycleCapabilities,
+ labelInjector map[string]string,
+ capabilitiesError error,
+ lifecycleHookError error,
+) *fakeLifecycleClient {
+ return &fakeLifecycleClient{
+ capabilities: capabilities,
+ labelInjector: labelInjector,
+ capabilitiesError: capabilitiesError,
+ lifecycleHookError: lifecycleHookError,
+ }
+}
+
+func (f *fakeLifecycleClient) GetCapabilities(
+ _ context.Context,
+ _ *lifecycle.OperatorLifecycleCapabilitiesRequest,
+ _ ...grpc.CallOption,
+) (*lifecycle.OperatorLifecycleCapabilitiesResponse, error) {
+ return &lifecycle.OperatorLifecycleCapabilitiesResponse{LifecycleCapabilities: f.capabilities}, f.capabilitiesError
+}
+
+func (f *fakeLifecycleClient) LifecycleHook(
+ _ context.Context,
+ in *lifecycle.OperatorLifecycleRequest,
+ _ ...grpc.CallOption,
+) (*lifecycle.OperatorLifecycleResponse, error) {
+ defRes := &lifecycle.OperatorLifecycleResponse{
+ JsonPatch: nil,
+ }
+
+ if f.lifecycleHookError != nil {
+ return defRes, f.lifecycleHookError
+ }
+
+ var cluster appsv1.Deployment
+ if err := tryDecode(in.ClusterDefinition, &cluster); err != nil {
+ return nil, fmt.Errorf("invalid cluster supplied: %w", err)
+ }
+
+ var instance corev1.Pod
+ if err := tryDecode(in.ObjectDefinition, &instance); err != nil {
+ return defRes, nil
+ }
+ var matches bool
+ for _, capability := range f.capabilities {
+ if capability.Kind != instance.Kind {
+ continue
+ }
+ }
+
+ if matches {
+ return defRes, nil
+ }
+
+ switch in.OperationType.Type {
+ case lifecycle.OperatorOperationType_TYPE_CREATE:
+ originalInstance := instance.DeepCopy()
+ if instance.Labels == nil {
+ instance.Labels = map[string]string{}
+ }
+ for key, value := range f.labelInjector {
+ instance.Labels[key] = value
+ }
+
+ res, err := createJSONPatchForLabels(originalInstance, &instance)
+ return &lifecycle.OperatorLifecycleResponse{JsonPatch: res}, err
+ case lifecycle.OperatorOperationType_TYPE_DELETE:
+ originalInstance := instance.DeepCopy()
+ for key := range f.labelInjector {
+ delete(instance.Labels, key)
+ }
+ res, err := createJSONPatchForLabels(originalInstance, &instance)
+ return &lifecycle.OperatorLifecycleResponse{JsonPatch: res}, err
+ default:
+ return defRes, nil
+ }
+}
+
+func tryDecode[T k8client.Object](rawObj []byte, cast T) error {
+ dec := decoder.NewYAMLOrJSONDecoder(bytes.NewReader(rawObj), 1000)
+
+ return dec.Decode(cast)
+}
+
+func (f *fakeLifecycleClient) set(d *pluginData) {
+ if d == nil {
+ return
+ }
+
+ d.lifecycleClient = f
+ d.lifecycleCapabilities = f.capabilities
+}
+
+var _ = Describe("LifecycleHook", func() {
+ var (
+ d *data
+ clusterObj k8client.Object
+ capabilities = []*lifecycle.OperatorLifecycleCapabilities{
+ {
+ Group: "",
+ Kind: "Pod",
+ OperationTypes: []*lifecycle.OperatorOperationType{
+ {
+ Type: lifecycle.OperatorOperationType_TYPE_CREATE,
+ },
+ {
+ Type: lifecycle.OperatorOperationType_TYPE_DELETE,
+ },
+ },
+ },
+ }
+ )
+
+ BeforeEach(func() {
+ d = &data{
+ plugins: []pluginData{
+ {
+ name: "test",
+ },
+ },
+ }
+
+ clusterObj = &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{},
+ }
+ })
+
+ It("should correctly inject the values in the passed object", func(ctx SpecContext) {
+ mapInjector := map[string]string{"test": "test"}
+ f := newFakeLifecycleClient(capabilities, mapInjector, nil, nil)
+ f.set(&d.plugins[0])
+
+ pod := &corev1.Pod{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Pod",
+ },
+ ObjectMeta: metav1.ObjectMeta{},
+ }
+ obj, err := d.LifecycleHook(ctx, plugin.OperationVerbCreate, clusterObj, pod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(obj).ToNot(BeNil())
+ podModified, ok := obj.(*corev1.Pod)
+ Expect(ok).To(BeTrue())
+ Expect(podModified.Labels).To(Equal(mapInjector))
+ })
+
+ // TODO: not currently passing
+ It("should correctly remove the values in the passed object", func(ctx SpecContext) {
+ mapInjector := map[string]string{"test": "test"}
+ f := newFakeLifecycleClient(capabilities, mapInjector, nil, nil)
+ f.set(&d.plugins[0])
+
+ pod := &corev1.Pod{
+ TypeMeta: metav1.TypeMeta{
+ APIVersion: "v1",
+ Kind: "Pod",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Labels: map[string]string{
+ "test": "test",
+ "other": "stuff",
+ },
+ },
+ }
+ obj, err := d.LifecycleHook(ctx, plugin.OperationVerbDelete, clusterObj, pod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(obj).ToNot(BeNil())
+ podModified, ok := obj.(*corev1.Pod)
+ Expect(ok).To(BeTrue())
+ Expect(podModified.Labels).To(Equal(map[string]string{"other": "stuff"}))
+ })
+})
+
+func createJSONPatchForLabels(originalInstance, instance *corev1.Pod) ([]byte, error) {
+ type patch []struct {
+ Op string `json:"op"`
+ Path string `json:"path"`
+ Value any `json:"value"`
+ }
+
+ op := "replace"
+ if len(originalInstance.Labels) == 0 {
+ op = "add"
+ }
+ p := patch{
+ {
+ Op: op,
+ Path: "/metadata/labels",
+ Value: instance.Labels,
+ },
+ }
+
+ return json.Marshal(p)
+}
diff --git a/internal/cnpi/plugin/client/reconciler.go b/internal/cnpi/plugin/client/reconciler.go
new file mode 100644
index 0000000000..5d9e096849
--- /dev/null
+++ b/internal/cnpi/plugin/client/reconciler.go
@@ -0,0 +1,169 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/reconciler"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+// newContinueResult returns a result instructing the reconciliation loop
+// to continue its operation
+func newContinueResult() ReconcilerHookResult { return ReconcilerHookResult{} }
+
+// newTerminateResult returns a result instructing the reconciliation loop to stop
+// reconciliation
+func newTerminateResult() ReconcilerHookResult { return ReconcilerHookResult{StopReconciliation: true} }
+
+// newReconcilerRequeueResult creates a new result instructing
+// a reconciler to schedule a loop in the passed time frame
+func newReconcilerRequeueResult(after int64) ReconcilerHookResult {
+ return ReconcilerHookResult{
+ Err: nil,
+ StopReconciliation: true,
+ Result: ctrl.Result{Requeue: true, RequeueAfter: time.Second * time.Duration(after)},
+ }
+}
+
+// newReconcilerErrorResult creates a new result from an error
+func newReconcilerErrorResult(err error) ReconcilerHookResult {
+ return ReconcilerHookResult{Err: err, StopReconciliation: true}
+}
+
+func (data *data) PreReconcile(ctx context.Context, cluster client.Object, object client.Object) ReconcilerHookResult {
+ return reconcilerHook(
+ ctx,
+ cluster,
+ object,
+ data.plugins,
+ func(
+ ctx context.Context,
+ plugin reconciler.ReconcilerHooksClient,
+ request *reconciler.ReconcilerHooksRequest,
+ ) (*reconciler.ReconcilerHooksResult, error) {
+ return plugin.Pre(ctx, request)
+ },
+ )
+}
+
+func (data *data) PostReconcile(ctx context.Context, cluster client.Object, object client.Object) ReconcilerHookResult {
+ return reconcilerHook(
+ ctx,
+ cluster,
+ object,
+ data.plugins,
+ func(
+ ctx context.Context,
+ plugin reconciler.ReconcilerHooksClient,
+ request *reconciler.ReconcilerHooksRequest,
+ ) (*reconciler.ReconcilerHooksResult, error) {
+ return plugin.Post(ctx, request)
+ },
+ )
+}
+
+type reconcilerHookFunc func(
+ ctx context.Context,
+ plugin reconciler.ReconcilerHooksClient,
+ request *reconciler.ReconcilerHooksRequest,
+) (*reconciler.ReconcilerHooksResult, error)
+
+func reconcilerHook(
+ ctx context.Context,
+ cluster client.Object,
+ object client.Object,
+ plugins []pluginData,
+ executeRequest reconcilerHookFunc,
+) ReconcilerHookResult {
+ contextLogger := log.FromContext(ctx)
+
+ serializedCluster, err := json.Marshal(cluster)
+ if err != nil {
+ return newReconcilerErrorResult(
+ fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ ),
+ )
+ }
+
+ serializedObject, err := json.Marshal(object)
+ if err != nil {
+ return newReconcilerErrorResult(
+ fmt.Errorf(
+ "while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ ),
+ )
+ }
+
+ request := &reconciler.ReconcilerHooksRequest{
+ ClusterDefinition: serializedCluster,
+ ResourceDefinition: serializedObject,
+ }
+
+ var kind reconciler.ReconcilerHooksCapability_Kind
+ switch cluster.GetObjectKind().GroupVersionKind().Kind {
+ case "Cluster":
+ kind = reconciler.ReconcilerHooksCapability_KIND_CLUSTER
+ case "Backup":
+ kind = reconciler.ReconcilerHooksCapability_KIND_BACKUP
+ default:
+ contextLogger.Info(
+ "Skipping reconciler hooks for unknown group",
+ "objectGvk", object.GetObjectKind())
+ return newContinueResult()
+ }
+
+ for idx := range plugins {
+ plugin := &plugins[idx]
+
+ if !slices.Contains(plugin.reconcilerCapabilities, kind) {
+ continue
+ }
+
+ result, err := executeRequest(ctx, plugin.reconcilerHooksClient, request)
+ if err != nil {
+ return newReconcilerErrorResult(err)
+ }
+
+ switch result.Behavior {
+ case reconciler.ReconcilerHooksResult_BEHAVIOR_TERMINATE:
+ return newTerminateResult()
+
+ case reconciler.ReconcilerHooksResult_BEHAVIOR_REQUEUE:
+ return newReconcilerRequeueResult(result.GetRequeueAfter())
+
+ case reconciler.ReconcilerHooksResult_BEHAVIOR_CONTINUE:
+ return newContinueResult()
+ }
+ }
+
+ return newContinueResult()
+}
diff --git a/internal/cnpi/plugin/client/suite_test.go b/internal/cnpi/plugin/client/suite_test.go
new file mode 100644
index 0000000000..78b821c239
--- /dev/null
+++ b/internal/cnpi/plugin/client/suite_test.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestClient(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Client Suite")
+}
diff --git a/internal/cnpi/plugin/client/wal.go b/internal/cnpi/plugin/client/wal.go
new file mode 100644
index 0000000000..ce5e91d3c9
--- /dev/null
+++ b/internal/cnpi/plugin/client/wal.go
@@ -0,0 +1,127 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/wal"
+ "go.uber.org/multierr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+)
+
+func (data *data) ArchiveWAL(
+ ctx context.Context,
+ cluster client.Object,
+ sourceFileName string,
+) error {
+ contextLogger := log.FromContext(ctx)
+
+ serializedCluster, err := json.Marshal(cluster)
+ if err != nil {
+ return fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ )
+ }
+
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if !slices.Contains(plugin.walCapabilities, wal.WALCapability_RPC_TYPE_ARCHIVE_WAL) {
+ continue
+ }
+
+ contextLogger := contextLogger.WithValues(
+ "pluginName", plugin.name,
+ )
+ request := wal.WALArchiveRequest{
+ ClusterDefinition: serializedCluster,
+ SourceFileName: sourceFileName,
+ }
+
+ contextLogger.Trace(
+ "Calling ArchiveWAL endpoint",
+ "clusterDefinition", request.ClusterDefinition,
+ "sourceFile", request.SourceFileName)
+ _, err := plugin.walClient.Archive(ctx, &request)
+ if err != nil {
+ contextLogger.Error(err, "Error while calling ArchiveWAL, failing")
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (data *data) RestoreWAL(
+ ctx context.Context,
+ cluster client.Object,
+ sourceWALName string,
+ destinationFileName string,
+) error {
+ var errorCollector error
+
+ contextLogger := log.FromContext(ctx)
+
+ serializedCluster, err := json.Marshal(cluster)
+ if err != nil {
+ return fmt.Errorf("while serializing %s %s/%s to JSON: %w",
+ cluster.GetObjectKind().GroupVersionKind().Kind,
+ cluster.GetNamespace(), cluster.GetName(),
+ err,
+ )
+ }
+
+ for idx := range data.plugins {
+ plugin := &data.plugins[idx]
+
+ if !slices.Contains(plugin.walCapabilities, wal.WALCapability_RPC_TYPE_RESTORE_WAL) {
+ continue
+ }
+
+ contextLogger := contextLogger.WithValues(
+ "pluginName", plugin.name,
+ )
+ request := wal.WALRestoreRequest{
+ ClusterDefinition: serializedCluster,
+ SourceWalName: sourceWALName,
+ DestinationFileName: destinationFileName,
+ }
+
+ contextLogger.Trace(
+ "Calling RestoreWAL endpoint",
+ "clusterDefinition", request.ClusterDefinition,
+ "sourceWALName", sourceWALName,
+ "destinationFileName", destinationFileName,
+ )
+ if _, err := plugin.walClient.Restore(ctx, &request); err != nil {
+ contextLogger.Trace("WAL restore via plugin failed, trying next one", "err", err)
+ errorCollector = multierr.Append(errorCollector, err)
+ } else {
+ return nil
+ }
+ }
+
+ return errorCollector
+}
diff --git a/internal/cnpi/plugin/doc.go b/internal/cnpi/plugin/doc.go
new file mode 100644
index 0000000000..c642a90b43
--- /dev/null
+++ b/internal/cnpi/plugin/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package plugin contains the logics that acts as bridge between cnpg-i and the operator
+package plugin
diff --git a/internal/cnpi/plugin/mapping.go b/internal/cnpi/plugin/mapping.go
new file mode 100644
index 0000000000..7fc5b67613
--- /dev/null
+++ b/internal/cnpi/plugin/mapping.go
@@ -0,0 +1,51 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package plugin
+
+import (
+ "fmt"
+
+ "github.com/cloudnative-pg/cnpg-i/pkg/lifecycle"
+)
+
+// The OperationVerb corresponds to the Kubernetes API method
+type OperationVerb string
+
+// A Kubernetes operation verb
+const (
+ OperationVerbPatch OperationVerb = "PATCH"
+ OperationVerbUpdate OperationVerb = "UPDATE"
+ OperationVerbCreate OperationVerb = "CREATE"
+ OperationVerbDelete OperationVerb = "DELETE"
+)
+
+// ToOperationType_Type converts an OperationVerb into a lifecycle.OperationType_Type
+// nolint: revive,stylecheck
+func (o OperationVerb) ToOperationType_Type() (lifecycle.OperatorOperationType_Type, error) {
+ switch o {
+ case OperationVerbPatch:
+ return lifecycle.OperatorOperationType_TYPE_PATCH, nil
+ case OperationVerbDelete:
+ return lifecycle.OperatorOperationType_TYPE_DELETE, nil
+ case OperationVerbCreate:
+ return lifecycle.OperatorOperationType_TYPE_CREATE, nil
+ case OperationVerbUpdate:
+ return lifecycle.OperatorOperationType_TYPE_UPDATE, nil
+ }
+
+ return lifecycle.OperatorOperationType_Type(0), fmt.Errorf("unknown operation type: '%s'", o)
+}
diff --git a/internal/cnpi/plugin/operatorclient/client.go b/internal/cnpi/plugin/operatorclient/client.go
new file mode 100644
index 0000000000..96a9878117
--- /dev/null
+++ b/internal/cnpi/plugin/operatorclient/client.go
@@ -0,0 +1,136 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operatorclient
+
+import (
+ "context"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
+ pluginclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+type extendedClient struct {
+ client.Client
+}
+
+// NewExtendedClient returns a client.Client capable of interacting with the plugin feature
+func NewExtendedClient(c client.Client) client.Client {
+ return &extendedClient{
+ Client: c,
+ }
+}
+
+func (e *extendedClient) invokePlugin(
+ ctx context.Context,
+ operationVerb plugin.OperationVerb,
+ obj client.Object,
+) (client.Object, error) {
+ contextLogger := log.FromContext(ctx).WithName("invokePlugin")
+
+ cluster, ok := ctx.Value(utils.ContextKeyCluster).(client.Object)
+ if !ok || cluster == nil {
+ contextLogger.Trace("skipping invokePlugin, cannot find the cluster inside the context")
+ return obj, nil
+ }
+
+ loader, ok := cluster.(pluginclient.Loader)
+ if !ok {
+ contextLogger.Trace("skipping invokePlugin, cluster does not adhere to Loader interface")
+ return obj, nil
+ }
+
+ pClient, err := loader.LoadPluginClient(ctx)
+ if err != nil {
+ contextLogger.Trace("skipping invokePlugin, cannot load the plugin client")
+ return obj, nil
+ }
+
+ contextLogger.Debug("correctly loaded the plugin client")
+
+ return pClient.LifecycleHook(ctx, operationVerb, cluster, obj)
+}
+
+// Create saves the object obj in the Kubernetes cluster. obj must be a
+// struct pointer so that obj can be updated with the content returned by the Server.
+func (e *extendedClient) Create(
+ ctx context.Context,
+ obj client.Object,
+ opts ...client.CreateOption,
+) error {
+ var err error
+ obj, err = e.invokePlugin(ctx, plugin.OperationVerbCreate, obj)
+ if err != nil {
+ return err
+ }
+ return e.Client.Create(ctx, obj, opts...)
+}
+
+// Delete deletes the given obj from Kubernetes cluster.
+func (e *extendedClient) Delete(
+ ctx context.Context,
+ obj client.Object,
+ opts ...client.DeleteOption,
+) error {
+ contextLogger := log.FromContext(ctx).WithName("extended_client_delete")
+
+ origObj := obj.DeepCopyObject().(client.Object)
+ var err error
+ obj, err = e.invokePlugin(ctx, plugin.OperationVerbDelete, obj)
+ if err != nil {
+ return err
+ }
+ if err := e.Client.Patch(ctx, obj, client.MergeFrom(origObj)); err != nil {
+ contextLogger.Error(err, "while patching before delete")
+ return err
+ }
+ return e.Client.Delete(ctx, obj, opts...)
+}
+
+// Update updates the given obj in the Kubernetes cluster. obj must be a
+// struct pointer so that obj can be updated with the content returned by the Server.
+func (e *extendedClient) Update(
+ ctx context.Context,
+ obj client.Object,
+ opts ...client.UpdateOption,
+) error {
+ var err error
+ obj, err = e.invokePlugin(ctx, plugin.OperationVerbUpdate, obj)
+ if err != nil {
+ return err
+ }
+ return e.Client.Update(ctx, obj, opts...)
+}
+
+// Patch patches the given obj in the Kubernetes cluster. obj must be a
+// struct pointer so that obj can be updated with the content returned by the Server.
+func (e *extendedClient) Patch(
+ ctx context.Context,
+ obj client.Object,
+ patch client.Patch,
+ opts ...client.PatchOption,
+) error {
+ var err error
+ obj, err = e.invokePlugin(ctx, plugin.OperationVerbPatch, obj)
+ if err != nil {
+ return err
+ }
+ return e.Client.Patch(ctx, obj, patch, opts...)
+}
diff --git a/internal/cnpi/plugin/operatorclient/client_test.go b/internal/cnpi/plugin/operatorclient/client_test.go
new file mode 100644
index 0000000000..e4aa67e354
--- /dev/null
+++ b/internal/cnpi/plugin/operatorclient/client_test.go
@@ -0,0 +1,95 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operatorclient
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin"
+ pluginclient "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/client"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+type fakeClusterCRD struct {
+ k8client.Object
+ pluginClient *fakePluginClient
+}
+
+func (f *fakeClusterCRD) LoadPluginClient(_ context.Context) (pluginclient.Client, error) {
+ return f.pluginClient, nil
+}
+
+type fakePluginClient struct {
+ pluginclient.Client
+ injectLabels map[string]string
+}
+
+func (f fakePluginClient) LifecycleHook(
+ _ context.Context,
+ _ plugin.OperationVerb,
+ _ k8client.Object,
+ object k8client.Object,
+) (k8client.Object, error) {
+ object.SetLabels(f.injectLabels)
+ return object, nil
+}
+
+var _ = Describe("extendedClient", func() {
+ var (
+ c *extendedClient
+ expectedLabels map[string]string
+ pluginClient *fakePluginClient
+ )
+
+ BeforeEach(func() {
+ c = &extendedClient{
+ Client: fake.NewClientBuilder().WithScheme(scheme.BuildWithAllKnownScheme()).Build(),
+ }
+ expectedLabels = map[string]string{"lifecycle": "true"}
+ pluginClient = &fakePluginClient{
+ injectLabels: expectedLabels,
+ }
+ })
+
+ It("invokePlugin", func(ctx SpecContext) {
+ fakeCrd := &fakeClusterCRD{
+ pluginClient: pluginClient,
+ }
+ newCtx := context.WithValue(ctx, utils.ContextKeyCluster, fakeCrd)
+ By("ensuring it works the first invocation", func() {
+ obj, err := c.invokePlugin(newCtx, plugin.OperationVerbCreate, &corev1.Pod{})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(obj.GetLabels()).To(Equal(expectedLabels))
+ })
+
+ By("ensuring it maintains the reference for subsequent invocations", func() {
+ newLabels := map[string]string{"test": "test"}
+ pluginClient.injectLabels = newLabels
+ obj, err := c.invokePlugin(newCtx, plugin.OperationVerbCreate, &corev1.Pod{})
+ Expect(err).ToNot(HaveOccurred())
+ Expect(obj.GetLabels()).To(Equal(newLabels))
+ })
+ })
+})
diff --git a/internal/cnpi/plugin/operatorclient/doc.go b/internal/cnpi/plugin/operatorclient/doc.go
new file mode 100644
index 0000000000..89ddcf2ee0
--- /dev/null
+++ b/internal/cnpi/plugin/operatorclient/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package operatorclient contains an extended kubernetes client that supports plugin API calls
+package operatorclient
diff --git a/internal/cnpi/plugin/operatorclient/suite_test.go b/internal/cnpi/plugin/operatorclient/suite_test.go
new file mode 100644
index 0000000000..52a34e0df9
--- /dev/null
+++ b/internal/cnpi/plugin/operatorclient/suite_test.go
@@ -0,0 +1,29 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package operatorclient
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestOperatorClient(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Operatorclient Suite")
+}
diff --git a/internal/configuration/configuration.go b/internal/configuration/configuration.go
index 1bcdfb64d7..8e6721e304 100644
--- a/internal/configuration/configuration.go
+++ b/internal/configuration/configuration.go
@@ -29,8 +29,19 @@ import (
var configurationLog = log.WithName("configuration")
-// DefaultOperatorPullSecretName is implicitly copied into newly created clusters.
-const DefaultOperatorPullSecretName = "cnpg-pull-secret" // #nosec
+const (
+ // DefaultOperatorPullSecretName is implicitly copied into newly created clusters.
+ DefaultOperatorPullSecretName = "cnpg-pull-secret" // #nosec
+
+ // CertificateDuration is the default value for the lifetime of the generated certificates
+ CertificateDuration = 90
+
+ // ExpiringCheckThreshold is the default threshold to consider a certificate as expiring
+ ExpiringCheckThreshold = 7
+)
+
+// DefaultPluginSocketDir is the default directory where the plugin sockets are located.
+const DefaultPluginSocketDir = "/plugins"
// Data is the struct containing the configuration of the operator.
// Usually the operator code will use the "Current" configuration.
@@ -39,6 +50,10 @@ type Data struct {
// need to written. This is different between plain Kubernetes and OpenShift
WebhookCertDir string `json:"webhookCertDir" env:"WEBHOOK_CERT_DIR"`
+ // PluginSocketDir is the directory where the plugins sockets are to be
+ // found
+ PluginSocketDir string `json:"pluginSocketDir" env:"PLUGIN_SOCKET_DIR"`
+
// WatchNamespace is the namespace where the operator should watch and
// is configurable via environment variables in the OpenShift console.
// Multiple namespaces can be specified separated by comma
@@ -85,6 +100,12 @@ type Data struct {
// EnablePodDebugging enable debugging mode in new generated pods
EnablePodDebugging bool `json:"enablePodDebugging" env:"POD_DEBUG"`
+ // This is the lifetime of the generated certificates
+ CertificateDuration int `json:"certificateDuration" env:"CERTIFICATE_DURATION"`
+
+ // Threshold to consider a certificate as expiring
+ ExpiringCheckThreshold int `json:"expiringCheckThreshold" env:"EXPIRING_CHECK_THRESHOLD"`
+
// CreateAnyService is true when the user wants the operator to create
// the -any service. Defaults to false.
CreateAnyService bool `json:"createAnyService" env:"CREATE_ANY_SERVICE"`
@@ -99,7 +120,10 @@ func newDefaultConfig() *Data {
OperatorPullSecretName: DefaultOperatorPullSecretName,
OperatorImageName: versions.DefaultOperatorImageName,
PostgresImageName: versions.DefaultImageName,
+ PluginSocketDir: DefaultPluginSocketDir,
CreateAnyService: false,
+ CertificateDuration: CertificateDuration,
+ ExpiringCheckThreshold: ExpiringCheckThreshold,
}
}
diff --git a/internal/management/controller/instance_controller.go b/internal/management/controller/instance_controller.go
index 440786334e..cd17dd14a1 100644
--- a/internal/management/controller/instance_controller.go
+++ b/internal/management/controller/instance_controller.go
@@ -58,8 +58,9 @@ import (
)
const (
- userSearchFunctionName = "user_search"
- userSearchFunction = "SELECT usename, passwd FROM pg_shadow WHERE usename=$1;"
+ userSearchFunctionSchema = "public"
+ userSearchFunctionName = "user_search"
+ userSearchFunction = "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1;"
)
// RetryUntilWalReceiverDown is the default retry configuration that is used
@@ -658,20 +659,23 @@ func (r *InstanceReconciler) reconcilePoolers(
return err
}
if !existsFunction {
- _, err = tx.Exec(fmt.Sprintf("CREATE OR REPLACE FUNCTION %s(uname TEXT) "+
+ _, err = tx.Exec(fmt.Sprintf("CREATE OR REPLACE FUNCTION %s.%s(uname TEXT) "+
"RETURNS TABLE (usename name, passwd text) "+
"as '%s' "+
"LANGUAGE sql SECURITY DEFINER",
+ userSearchFunctionSchema,
userSearchFunctionName,
userSearchFunction))
if err != nil {
return err
}
- _, err = tx.Exec(fmt.Sprintf("REVOKE ALL ON FUNCTION %s(text) FROM public;", userSearchFunctionName))
+ _, err = tx.Exec(fmt.Sprintf("REVOKE ALL ON FUNCTION %s.%s(text) FROM public;",
+ userSearchFunctionSchema, userSearchFunctionName))
if err != nil {
return err
}
- _, err = tx.Exec(fmt.Sprintf("GRANT EXECUTE ON FUNCTION %s(text) TO %s",
+ _, err = tx.Exec(fmt.Sprintf("GRANT EXECUTE ON FUNCTION %s.%s(text) TO %s",
+ userSearchFunctionSchema,
userSearchFunctionName,
apiv1.PGBouncerPoolerUserName))
if err != nil {
diff --git a/internal/management/controller/roles/postgres.go b/internal/management/controller/roles/postgres.go
index e87c912145..47050f4f30 100644
--- a/internal/management/controller/roles/postgres.go
+++ b/internal/management/controller/roles/postgres.go
@@ -362,9 +362,7 @@ func appendRoleOptions(role DatabaseRole, query *strings.Builder) {
query.WriteString(" NOSUPERUSER")
}
- if role.ConnectionLimit > -1 {
- query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit))
- }
+ query.WriteString(fmt.Sprintf(" CONNECTION LIMIT %d", role.ConnectionLimit))
}
func appendPasswordOption(role DatabaseRole,
diff --git a/internal/management/controller/roles/postgres_test.go b/internal/management/controller/roles/postgres_test.go
index e3b878f27b..d003af03f3 100644
--- a/internal/management/controller/roles/postgres_test.go
+++ b/internal/management/controller/roles/postgres_test.go
@@ -95,6 +95,10 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
InRoles: []string{"pg_monitoring"},
DisablePassword: true,
}
+ wantedRoleWithDefaultConnectionLimit := apiv1.RoleConfiguration{
+ Name: "foo",
+ ConnectionLimit: -1,
+ }
unWantedRole := apiv1.RoleConfiguration{
Name: "foo",
}
@@ -117,6 +121,10 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
"CREATE ROLE \"%s\" BYPASSRLS NOCREATEDB CREATEROLE NOINHERIT LOGIN NOREPLICATION "+
"NOSUPERUSER CONNECTION LIMIT 2 IN ROLE pg_monitoring PASSWORD NULL VALID UNTIL '2100-01-01 00:00:00Z'",
wantedRole.Name)
+ wantedRoleWithDefaultConnectionLimitExpectedCrtStmt := fmt.Sprintf(
+ "CREATE ROLE \"%s\" NOBYPASSRLS NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION "+
+ "NOSUPERUSER CONNECTION LIMIT -1",
+ wantedRoleWithDefaultConnectionLimit.Name)
wantedRoleCommentStmt := fmt.Sprintf(
"COMMENT ON ROLE \"%s\" IS %s",
@@ -309,6 +317,18 @@ var _ = Describe("Postgres RoleManager implementation test", func() {
roleConfigurationAdapter{RoleConfiguration: wantedRoleWithPassDeletion}.toDatabaseRole())
Expect(err).ShouldNot(HaveOccurred())
})
+ It("Create will send a correct CREATE with password deletion to the DB", func(ctx context.Context) {
+ db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
+ Expect(err).ToNot(HaveOccurred())
+ prm := NewPostgresRoleManager(db)
+
+ mock.ExpectExec(wantedRoleWithDefaultConnectionLimitExpectedCrtStmt).
+ WillReturnResult(sqlmock.NewResult(2, 3))
+
+ err = prm.Create(ctx,
+ roleConfigurationAdapter{RoleConfiguration: wantedRoleWithDefaultConnectionLimit}.toDatabaseRole())
+ Expect(err).ShouldNot(HaveOccurred())
+ })
// Testing Delete
It("Delete will send a correct DROP to the DB", func(ctx context.Context) {
db, mock, err := sqlmock.New(sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual))
diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go
index 1020fabe45..355d597afc 100644
--- a/pkg/certs/certs.go
+++ b/pkg/certs/certs.go
@@ -33,11 +33,11 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
)
const (
- // This is the lifetime of the generated certificates
- certificateDuration = 90 * 24 * time.Hour
// This is the PEM block type of elliptic courves private key
ecPrivateKeyPEMBlockType = "EC PRIVATE KEY"
@@ -45,9 +45,6 @@ const (
// This is the PEM block type for certificates
certificatePEMBlockType = "CERTIFICATE"
- // Threshold to consider a certificate as expiring
- expiringCheckThreshold = 7 * 24 * time.Hour
-
// CACertKey is the key for certificates in a CA secret
CACertKey = "ca.crt"
@@ -143,6 +140,7 @@ func (pair KeyPair) IsValid(caPair *KeyPair, opts *x509.VerifyOptions) error {
// CreateAndSignPair given a CA keypair, generate and sign a leaf keypair
func (pair KeyPair) CreateAndSignPair(host string, usage CertType, altDNSNames []string) (*KeyPair, error) {
+ certificateDuration := getCertificateDuration()
notBefore := time.Now().Add(time.Minute * -5)
notAfter := notBefore.Add(certificateDuration)
return pair.createAndSignPairWithValidity(host, notBefore, notAfter, usage, altDNSNames)
@@ -267,6 +265,7 @@ func (pair *KeyPair) RenewCertificate(caPrivateKey *ecdsa.PrivateKey, parentCert
return err
}
+ certificateDuration := getCertificateDuration()
notBefore := time.Now().Add(time.Minute * -5)
notAfter := notBefore.Add(certificateDuration)
@@ -314,6 +313,7 @@ func (pair *KeyPair) IsExpiring() (bool, *time.Time, error) {
if time.Now().Before(cert.NotBefore) {
return true, &cert.NotAfter, nil
}
+ expiringCheckThreshold := getCheckThreshold()
if time.Now().Add(expiringCheckThreshold).After(cert.NotAfter) {
return true, &cert.NotAfter, nil
}
@@ -334,6 +334,7 @@ func (pair *KeyPair) CreateDerivedCA(commonName string, organizationalUnit strin
return nil, err
}
+ certificateDuration := getCertificateDuration()
notBefore := time.Now().Add(time.Minute * -5)
notAfter := notBefore.Add(certificateDuration)
@@ -342,6 +343,7 @@ func (pair *KeyPair) CreateDerivedCA(commonName string, organizationalUnit strin
// CreateRootCA generates a CA returning its keys
func CreateRootCA(commonName string, organizationalUnit string) (*KeyPair, error) {
+ certificateDuration := getCertificateDuration()
notBefore := time.Now().Add(time.Minute * -5)
notAfter := notBefore.Add(certificateDuration)
return createCAWithValidity(notBefore, notAfter, nil, nil, commonName, organizationalUnit)
@@ -466,3 +468,19 @@ func encodeCertificate(derBytes []byte) []byte {
func encodePrivateKey(derBytes []byte) []byte {
return pem.EncodeToMemory(&pem.Block{Type: ecPrivateKeyPEMBlockType, Bytes: derBytes})
}
+
+func getCertificateDuration() time.Duration {
+ duration := configuration.Current.CertificateDuration
+ if duration <= 0 {
+ return configuration.CertificateDuration * 24 * time.Hour
+ }
+ return time.Duration(duration) * 24 * time.Hour
+}
+
+func getCheckThreshold() time.Duration {
+ threshold := configuration.Current.ExpiringCheckThreshold
+ if threshold <= 0 {
+ return configuration.ExpiringCheckThreshold * 24 * time.Hour
+ }
+ return time.Duration(threshold) * 24 * time.Hour
+}
diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go
index 927dca8b3e..18caf12e23 100644
--- a/pkg/certs/certs_test.go
+++ b/pkg/certs/certs_test.go
@@ -22,6 +22,8 @@ import (
"encoding/pem"
"time"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/configuration"
+
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
@@ -278,3 +280,47 @@ var _ = Describe("Keypair generation", func() {
})
})
})
+
+var _ = Describe("Certicate duration and expiration threshold", func() {
+ defaultCertificateDuration := configuration.CertificateDuration * 24 * time.Hour
+ defaultExpiringThreshold := configuration.ExpiringCheckThreshold * 24 * time.Hour
+ tenDays := 10 * 24 * time.Hour
+
+ It("returns the default duration", func() {
+ duration := getCertificateDuration()
+ Expect(duration).To(BeEquivalentTo(defaultCertificateDuration))
+ })
+
+ It("returns the default duration if the configuration is a negative value", func() {
+ configuration.Current = configuration.NewConfiguration()
+ configuration.Current.CertificateDuration = -1
+ duration := getCertificateDuration()
+ Expect(duration).To(BeEquivalentTo(defaultCertificateDuration))
+ })
+
+ It("returns a valid duration of 10 days", func() {
+ configuration.Current = configuration.NewConfiguration()
+ configuration.Current.CertificateDuration = 10
+ duration := getCertificateDuration()
+ Expect(duration).To(BeEquivalentTo(tenDays))
+ })
+
+ It("returns the default check threshold", func() {
+ threshold := getCheckThreshold()
+ Expect(threshold).To(BeEquivalentTo(defaultExpiringThreshold))
+ })
+
+ It("returns the default check threshold if the configuration is a negative value", func() {
+ configuration.Current = configuration.NewConfiguration()
+ configuration.Current.ExpiringCheckThreshold = -1
+ threshold := getCheckThreshold()
+ Expect(threshold).To(BeEquivalentTo(defaultExpiringThreshold))
+ })
+
+ It("returns a valid threshold of 10 days", func() {
+ configuration.Current = configuration.NewConfiguration()
+ configuration.Current.ExpiringCheckThreshold = 10
+ threshold := getCheckThreshold()
+ Expect(threshold).To(BeEquivalentTo(tenDays))
+ })
+})
diff --git a/pkg/configparser/configparser.go b/pkg/configparser/configparser.go
index f7698a3961..9c736232c1 100644
--- a/pkg/configparser/configparser.go
+++ b/pkg/configparser/configparser.go
@@ -82,6 +82,9 @@ func ReadConfigMap(target interface{}, defaults interface{}, data map[string]str
case reflect.Bool:
value = strconv.FormatBool(valueField.Bool())
+ case reflect.Int:
+ value = fmt.Sprintf("%v", valueField.Int())
+
case reflect.Slice:
if valueField.Type().Elem().Kind() != reflect.String {
configparserLog.Info(
@@ -113,6 +116,15 @@ func ReadConfigMap(target interface{}, defaults interface{}, data map[string]str
continue
}
reflect.ValueOf(target).Elem().FieldByName(field.Name).SetBool(boolValue)
+ case reflect.Int:
+ intValue, err := strconv.ParseInt(value, 10, 0)
+ if err != nil {
+ configparserLog.Info(
+ "Skipping invalid integer value parsing configuration",
+ "field", field.Name, "value", value)
+ continue
+ }
+ reflect.ValueOf(target).Elem().FieldByName(field.Name).SetInt(intValue)
case reflect.String:
reflect.ValueOf(target).Elem().FieldByName(field.Name).SetString(value)
case reflect.Slice:
diff --git a/pkg/configparser/configparser_test.go b/pkg/configparser/configparser_test.go
index 98b00df0ba..fb80c6ad52 100644
--- a/pkg/configparser/configparser_test.go
+++ b/pkg/configparser/configparser_test.go
@@ -36,6 +36,12 @@ type FakeData struct {
// the owning Cluster
InheritedLabels []string `json:"inheritedLabels" env:"INHERITED_LABELS"`
+ // This is the lifetime of the generated certificates
+ CertificateDuration int `json:"certificateDuration" env:"CERTIFICATE_DURATION"`
+
+ // Threshold to consider a certificate as expiring
+ ExpiringCheckThreshold int `json:"expiringCheckThreshold" env:"EXPIRING_CHECK_THRESHOLD"`
+
// EnablePodDebugging enable debugging mode in new generated pods
EnablePodDebugging bool `json:"enablePodDebugging" env:"POD_DEBUG"`
}
@@ -74,14 +80,34 @@ var _ = Describe("Data test suite", func() {
It("loads values from environment", func() {
config := &FakeData{}
fakeEnv := NewFakeEnvironment(map[string]string{
- "WATCH_NAMESPACE": oneNamespace,
- "INHERITED_ANNOTATIONS": "one, two",
- "INHERITED_LABELS": "alpha, beta",
+ "WATCH_NAMESPACE": oneNamespace,
+ "INHERITED_ANNOTATIONS": "one, two",
+ "INHERITED_LABELS": "alpha, beta",
+ "EXPIRING_CHECK_THRESHOLD": "2",
})
config.readConfigMap(nil, fakeEnv)
Expect(config.WatchNamespace).To(Equal(oneNamespace))
Expect(config.InheritedAnnotations).To(Equal([]string{"one", "two"}))
Expect(config.InheritedLabels).To(Equal([]string{"alpha", "beta"}))
+ Expect(config.ExpiringCheckThreshold).To(Equal(2))
+ })
+
+ It("reset to default value if format is not correct", func() {
+ config := &FakeData{
+ CertificateDuration: 90,
+ ExpiringCheckThreshold: 7,
+ }
+ fakeEnv := NewFakeEnvironment(map[string]string{
+ "EXPIRING_CHECK_THRESHOLD": "3600min",
+ "CERTIFICATE_DURATION": "unknown",
+ })
+ defaultData := &FakeData{
+ CertificateDuration: 90,
+ ExpiringCheckThreshold: 7,
+ }
+ ReadConfigMap(config, defaultData, nil, fakeEnv)
+ Expect(config.ExpiringCheckThreshold).To(Equal(7))
+ Expect(config.CertificateDuration).To(Equal(90))
})
It("handles correctly default values of slices", func() {
diff --git a/pkg/management/external/external.go b/pkg/management/external/external.go
index 7da35ef2b9..dbcada27c6 100644
--- a/pkg/management/external/external.go
+++ b/pkg/management/external/external.go
@@ -26,6 +26,37 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/configfile"
)
+// GetServerConnectionString gets the connection string to be
+// used to connect to this external server, without dumping
+// the required cryptographic material
+func GetServerConnectionString(
+ server *apiv1.ExternalCluster,
+) string {
+ connectionParameters := maps.Clone(server.ConnectionParameters)
+
+ if server.SSLCert != nil {
+ name := getSecretKeyRefFileName(server.Name, server.SSLCert)
+ connectionParameters["sslcert"] = name
+ }
+
+ if server.SSLKey != nil {
+ name := getSecretKeyRefFileName(server.Name, server.SSLKey)
+ connectionParameters["sslkey"] = name
+ }
+
+ if server.SSLRootCert != nil {
+ name := getSecretKeyRefFileName(server.Name, server.SSLRootCert)
+ connectionParameters["sslrootcert"] = name
+ }
+
+ if server.Password != nil {
+ pgpassfile := getPgPassFilePath(server.Name)
+ connectionParameters["passfile"] = pgpassfile
+ }
+
+ return configfile.CreateConnectionString(connectionParameters)
+}
+
// ConfigureConnectionToServer creates a connection string to the external
// server, using the configuration inside the cluster and dumping the secret when
// needed in a custom passfile.
diff --git a/pkg/management/external/utils.go b/pkg/management/external/utils.go
index c8116787e5..959b2a2cfc 100644
--- a/pkg/management/external/utils.go
+++ b/pkg/management/external/utils.go
@@ -68,6 +68,17 @@ func readSecretKeyRef(
return string(value), err
}
+// getSecretKeyRefFileName get the name of the file where the content of the
+// connection secret will be dumped
+func getSecretKeyRefFileName(
+ serverName string,
+ selector *corev1.SecretKeySelector,
+) string {
+ directory := path.Join(getExternalSecretsPath(), serverName)
+ filePath := path.Join(directory, fmt.Sprintf("%v_%v", selector.Name, selector.Key))
+ return filePath
+}
+
// dumpSecretKeyRefToFile dumps a certain secret to a file inside a temporary folder
// using 0600 as permission bits.
//
@@ -112,6 +123,13 @@ func dumpSecretKeyRefToFile(
return f.Name(), nil
}
+// getPgPassFilePath gets the path where the pgpass file will be stored
+func getPgPassFilePath(serverName string) string {
+ directory := path.Join(getExternalSecretsPath(), serverName)
+ filePath := path.Join(directory, "pgpass")
+ return filePath
+}
+
// createPgPassFile creates a pgpass file inside the user home directory
func createPgPassFile(
serverName string,
diff --git a/pkg/management/postgres/backup.go b/pkg/management/postgres/backup.go
index 40e4730ea2..37de1e1502 100644
--- a/pkg/management/postgres/backup.go
+++ b/pkg/management/postgres/backup.go
@@ -69,8 +69,9 @@ type BackupCommand struct {
Capabilities *barmanCapabilities.Capabilities
}
-// NewBackupCommand initializes a BackupCommand object
-func NewBackupCommand(
+// NewBarmanBackupCommand initializes a BackupCommand object, taking a physical
+// backup using Barman Cloud
+func NewBarmanBackupCommand(
cluster *apiv1.Cluster,
backup *apiv1.Backup,
client client.Client,
@@ -250,16 +251,7 @@ func (b *BackupCommand) retryWithRefreshedCluster(
ctx context.Context,
cb func() error,
) error {
- return retry.OnError(retry.DefaultBackoff, resources.RetryAlways, func() error {
- if err := b.Client.Get(ctx, types.NamespacedName{
- Namespace: b.Cluster.Namespace,
- Name: b.Cluster.Name,
- }, b.Cluster); err != nil {
- return err
- }
-
- return cb()
- })
+ return resources.RetryWithRefreshedResource(ctx, b.Client, b.Cluster, cb)
}
// run executes the barman-cloud-backup command and updates the status
diff --git a/pkg/management/postgres/configuration.go b/pkg/management/postgres/configuration.go
index 5ebbddf063..fbb470b49f 100644
--- a/pkg/management/postgres/configuration.go
+++ b/pkg/management/postgres/configuration.go
@@ -32,6 +32,7 @@ import (
"github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/constants"
postgresutils "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
// InstallPgDataFileContent installs a file in PgData, returning true/false if
@@ -425,6 +426,7 @@ func createPostgresqlConfiguration(cluster *apiv1.Cluster, preserveUserSettings
IncludingSharedPreloadLibraries: true,
AdditionalSharedPreloadLibraries: cluster.Spec.PostgresConfiguration.AdditionalLibraries,
IsReplicaCluster: cluster.IsReplica(),
+ IsWalArchivingDisabled: utils.IsWalArchivingDisabled(&cluster.ObjectMeta),
}
if preserveUserSettings {
diff --git a/pkg/management/postgres/webserver/local.go b/pkg/management/postgres/webserver/local.go
index 00a691e9ed..d08bc67ff9 100644
--- a/pkg/management/postgres/webserver/local.go
+++ b/pkg/management/postgres/webserver/local.go
@@ -161,38 +161,72 @@ func (ws *localWebserverEndpoints) requestBackup(w http.ResponseWriter, r *http.
return
}
- if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
- http.Error(w, "Backup not configured in the cluster", http.StatusConflict)
- return
+ switch backup.Spec.Method {
+ case apiv1.BackupMethodBarmanObjectStore:
+ if cluster.Spec.Backup == nil || cluster.Spec.Backup.BarmanObjectStore == nil {
+ http.Error(w, "Barman backup not configured in the cluster", http.StatusConflict)
+ return
+ }
+
+ if err := ws.startBarmanBackup(ctx, &cluster, &backup); err != nil {
+ http.Error(
+ w,
+ fmt.Sprintf("error while requesting backup: %v", err.Error()),
+ http.StatusInternalServerError)
+ return
+ }
+ _, _ = fmt.Fprint(w, "OK")
+
+ case apiv1.BackupMethodPlugin:
+ if backup.Spec.PluginConfiguration.IsEmpty() {
+ http.Error(w, "Plugin backup not configured in the cluster", http.StatusConflict)
+ return
+ }
+
+ ws.startPluginBackup(ctx, &cluster, &backup)
+ _, _ = fmt.Fprint(w, "OK")
+
+ default:
+ http.Error(
+ w,
+ fmt.Sprintf("Unknown backup method: %v", backup.Spec.Method),
+ http.StatusBadRequest)
}
+}
+func (ws *localWebserverEndpoints) startBarmanBackup(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ backup *apiv1.Backup,
+) error {
backupLog := log.WithValues(
"backupName", backup.Name,
"backupNamespace", backup.Name)
- backupCommand, err := postgres.NewBackupCommand(
- &cluster,
- &backup,
+ backupCommand, err := postgres.NewBarmanBackupCommand(
+ cluster,
+ backup,
ws.typedClient,
ws.eventRecorder,
ws.instance,
backupLog,
)
if err != nil {
- http.Error(
- w,
- fmt.Sprintf("error while initializing backup: %v", err.Error()),
- http.StatusInternalServerError)
- return
+ return fmt.Errorf("while initializing backup: %w", err)
}
if err := backupCommand.Start(ctx); err != nil {
- http.Error(
- w,
- fmt.Sprintf("error while starting backup: %v", err.Error()),
- http.StatusInternalServerError)
- return
+ return fmt.Errorf("while starting backup: %w", err)
}
- _, _ = fmt.Fprint(w, "OK")
+ return nil
+}
+
+func (ws *localWebserverEndpoints) startPluginBackup(
+ ctx context.Context,
+ cluster *apiv1.Cluster,
+ backup *apiv1.Backup,
+) {
+ cmd := NewPluginBackupCommand(cluster, backup, ws.typedClient, ws.eventRecorder)
+ cmd.Start(ctx)
}
diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go
new file mode 100644
index 0000000000..02b3bbec7d
--- /dev/null
+++ b/pkg/management/postgres/webserver/plugin_backup.go
@@ -0,0 +1,167 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package webserver
+
+import (
+ "context"
+ "time"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/record"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/conditions"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/log"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/management/postgres"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/resources"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+)
+
+// PluginBackupCommand represent a backup command that is being executed
+type PluginBackupCommand struct {
+ Cluster *apiv1.Cluster
+ Backup *apiv1.Backup
+ Client client.Client
+ Recorder record.EventRecorder
+ Log log.Logger
+}
+
+// NewPluginBackupCommand initializes a BackupCommand object, taking a physical
+// backup using Barman Cloud
+func NewPluginBackupCommand(
+ cluster *apiv1.Cluster,
+ backup *apiv1.Backup,
+ client client.Client,
+ recorder record.EventRecorder,
+) *PluginBackupCommand {
+ logger := log.WithValues(
+ "pluginConfiguration", backup.Spec.PluginConfiguration,
+ "backupName", backup.Name,
+ "backupNamespace", backup.Name)
+ return &PluginBackupCommand{
+ Cluster: cluster,
+ Backup: backup,
+ Client: client,
+ Recorder: recorder,
+ Log: logger,
+ }
+}
+
+// Start starts a backup using the Plugin
+func (b *PluginBackupCommand) Start(ctx context.Context) {
+ go b.invokeStart(ctx)
+}
+
+func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
+ backupLog := b.Log.WithValues(
+ "backupName", b.Backup.Name,
+ "backupNamespace", b.Backup.Name)
+
+ cli, err := b.Cluster.LoadPluginClient(ctx)
+ if err != nil {
+ b.markBackupAsFailed(ctx, err)
+ return
+ }
+
+ // record the backup beginning
+ backupLog.Info("Plugin backup started")
+ b.Recorder.Event(b.Backup, "Normal", "Starting", "Backup started")
+
+ response, err := cli.Backup(
+ ctx,
+ b.Cluster,
+ b.Backup,
+ b.Backup.Spec.PluginConfiguration.Name,
+ b.Backup.Spec.PluginConfiguration.Parameters)
+ if err != nil {
+ b.markBackupAsFailed(ctx, err)
+ return
+ }
+
+ backupLog.Info("Backup completed")
+ b.Recorder.Event(b.Backup, "Normal", "Completed", "Backup completed")
+
+ // Set the status to completed
+ b.Backup.Status.SetAsCompleted()
+
+ // Fill the backup status from the plugin
+ // Note: the InstanceID field is set by the operator backup controller
+ b.Backup.Status.BackupID = response.BackupID
+ b.Backup.Status.BackupName = response.BackupName
+ b.Backup.Status.BeginWal = response.BeginWal
+ b.Backup.Status.EndWal = response.EndWal
+ b.Backup.Status.BeginLSN = response.BeginLsn
+ b.Backup.Status.EndLSN = response.EndLsn
+ b.Backup.Status.BackupLabelFile = response.BackupLabelFile
+ b.Backup.Status.TablespaceMapFile = response.TablespaceMapFile
+ b.Backup.Status.Online = ptr.To(response.Online)
+
+ if !response.StartedAt.IsZero() {
+ b.Backup.Status.StartedAt = ptr.To(metav1.NewTime(response.StartedAt))
+ }
+ if !response.StoppedAt.IsZero() {
+ b.Backup.Status.StoppedAt = ptr.To(metav1.NewTime(response.StoppedAt))
+ }
+
+ if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil {
+ backupLog.Error(err, "Can't set backup status as completed")
+ }
+
+ // Update backup status in cluster conditions on backup completion
+ if err := b.retryWithRefreshedCluster(ctx, func() error {
+ return conditions.Patch(ctx, b.Client, b.Cluster, apiv1.BackupSucceededCondition)
+ }); err != nil {
+ b.Log.Error(err, "Can't update the cluster with the completed backup data")
+ }
+}
+
+func (b *PluginBackupCommand) markBackupAsFailed(ctx context.Context, failure error) {
+ backupStatus := b.Backup.GetStatus()
+
+ // record the failure
+ b.Log.Error(failure, "Backup failed")
+ b.Recorder.Event(b.Backup, "Normal", "Failed", "Backup failed")
+
+ // update backup status as failed
+ backupStatus.SetAsFailed(failure)
+ if err := postgres.PatchBackupStatusAndRetry(ctx, b.Client, b.Backup); err != nil {
+ b.Log.Error(err, "Can't mark backup as failed")
+ // We do not terminate here because we still want to set the condition on the cluster.
+ }
+
+ // add backup failed condition to the cluster
+ if failErr := b.retryWithRefreshedCluster(ctx, func() error {
+ origCluster := b.Cluster.DeepCopy()
+
+ meta.SetStatusCondition(&b.Cluster.Status.Conditions, *apiv1.BuildClusterBackupFailedCondition(failure))
+
+ b.Cluster.Status.LastFailedBackup = utils.GetCurrentTimestampWithFormat(time.RFC3339)
+ return b.Client.Status().Patch(ctx, b.Cluster, client.MergeFrom(origCluster))
+ }); failErr != nil {
+ b.Log.Error(failErr, "while setting cluster condition for failed backup")
+ }
+}
+
+func (b *PluginBackupCommand) retryWithRefreshedCluster(
+ ctx context.Context,
+ cb func() error,
+) error {
+ return resources.RetryWithRefreshedResource(ctx, b.Client, b.Cluster, cb)
+}
diff --git a/pkg/postgres/configuration.go b/pkg/postgres/configuration.go
index 0115dab36d..6a44e4aa89 100644
--- a/pkg/postgres/configuration.go
+++ b/pkg/postgres/configuration.go
@@ -28,8 +28,14 @@ import (
// WalLevelValue a value that is assigned to the 'wal_level' configuration field
type WalLevelValue string
-// WalLevelParameter the configuration key containing the wal_level value
-const WalLevelParameter = "wal_level"
+// ParameterWalLevel the configuration key containing the wal_level value
+const ParameterWalLevel = "wal_level"
+
+// ParameterMaxWalSenders the configuration key containing the max_wal_senders value
+const ParameterMaxWalSenders = "max_wal_senders"
+
+// ParameterArchiveMode the configuration key containing the archive_mode value
+const ParameterArchiveMode = "archive_mode"
// An acceptable wal_level value
const (
@@ -297,6 +303,9 @@ type ConfigurationInfo struct {
// TemporaryTablespaces is the list of temporary tablespaces
TemporaryTablespaces []string
+
+ // IsWalArchivingDisabled is true when user requested to disable WAL archiving
+ IsWalArchivingDisabled bool
}
// ManagedExtension defines all the information about a managed extension
@@ -615,9 +624,14 @@ func CreatePostgresqlConfiguration(info ConfigurationInfo) *PgConfiguration {
}
// Apply the correct archive_mode
- if info.IsReplicaCluster {
+ switch {
+ case info.IsWalArchivingDisabled:
+ configuration.OverwriteConfig("archive_mode", "off")
+
+ case info.IsReplicaCluster:
configuration.OverwriteConfig("archive_mode", "always")
- } else {
+
+ default:
configuration.OverwriteConfig("archive_mode", "on")
}
diff --git a/pkg/reconciler/backup/volumesnapshot/offline.go b/pkg/reconciler/backup/volumesnapshot/offline.go
index deda07e52e..87ebc36fc2 100644
--- a/pkg/reconciler/backup/volumesnapshot/offline.go
+++ b/pkg/reconciler/backup/volumesnapshot/offline.go
@@ -44,7 +44,7 @@ func newOfflineExecutor(cli client.Client, recorder record.EventRecorder) *offli
return &offlineExecutor{cli: cli, recorder: recorder}
}
-func (o offlineExecutor) finalize(
+func (o *offlineExecutor) finalize(
ctx context.Context,
cluster *apiv1.Cluster,
backup *apiv1.Backup,
@@ -53,7 +53,7 @@ func (o offlineExecutor) finalize(
return nil, EnsurePodIsUnfenced(ctx, o.cli, o.recorder, cluster, backup, targetPod)
}
-func (o offlineExecutor) prepare(
+func (o *offlineExecutor) prepare(
ctx context.Context,
cluster *apiv1.Cluster,
backup *apiv1.Backup,
diff --git a/pkg/reconciler/backup/volumesnapshot/offline_test.go b/pkg/reconciler/backup/volumesnapshot/offline_test.go
new file mode 100644
index 0000000000..4f4c6edb0a
--- /dev/null
+++ b/pkg/reconciler/backup/volumesnapshot/offline_test.go
@@ -0,0 +1,141 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volumesnapshot
+
+import (
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/tools/record"
+ k8client "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
+ "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("offlineExecutor", func() {
+ var (
+ backup *apiv1.Backup
+ cluster *apiv1.Cluster
+ pod *corev1.Pod
+ oe *offlineExecutor
+ )
+
+ BeforeEach(func() {
+ pod = &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster-test-1",
+ Namespace: "default",
+ },
+ Status: corev1.PodStatus{
+ Conditions: []corev1.PodCondition{
+ {
+ Type: corev1.ContainersReady,
+ Status: corev1.ConditionTrue,
+ },
+ },
+ },
+ }
+
+ backup = &apiv1.Backup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "backup-test",
+ Namespace: "default",
+ },
+ }
+
+ cluster = &apiv1.Cluster{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "cluster-test",
+ Namespace: "default",
+ },
+ Spec: apiv1.ClusterSpec{},
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: pod.Name,
+ TargetPrimary: pod.Name,
+ },
+ }
+
+ oe = &offlineExecutor{
+ cli: fake.NewClientBuilder().
+ WithScheme(scheme.BuildWithAllKnownScheme()).
+ WithObjects(cluster, pod).
+ WithStatusSubresource(cluster, pod).
+ Build(),
+ recorder: record.NewFakeRecorder(100000),
+ }
+ })
+
+ It("ensurePodIsFenced should correctly fence the pod", func(ctx SpecContext) {
+ err := oe.ensurePodIsFenced(ctx, cluster, backup, pod.Name)
+ Expect(err).ToNot(HaveOccurred())
+
+ var patchedCluster apiv1.Cluster
+ err = oe.cli.Get(ctx, k8client.ObjectKeyFromObject(cluster), &patchedCluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ list, err := utils.GetFencedInstances(patchedCluster.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(list.ToList()).ToNot(BeEmpty())
+ Expect(list.Has(pod.Name)).To(BeTrue())
+ })
+
+ It("should ensure that waitForPodToBeFenced correctly evaluates pod conditions", func(ctx SpecContext) {
+ res, err := oe.waitForPodToBeFenced(ctx, pod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res.RequeueAfter).To(Equal(time.Second * 10))
+
+ pod.Status.Conditions[0].Status = corev1.ConditionFalse
+ err = oe.cli.Status().Update(ctx, pod)
+ Expect(err).ToNot(HaveOccurred())
+
+ res, err = oe.waitForPodToBeFenced(ctx, pod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeNil())
+ })
+
+ It("finalize should remove the fencing annotation from the cluster", func(ctx SpecContext) {
+ err := utils.AddFencedInstance(pod.Name, &cluster.ObjectMeta)
+ Expect(err).ToNot(HaveOccurred())
+
+ err = oe.cli.Update(ctx, cluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ list, err := utils.GetFencedInstances(cluster.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(list.ToList()).ToNot(BeEmpty())
+ Expect(list.Has(pod.Name)).To(BeTrue())
+
+ res, err := oe.finalize(ctx, cluster, backup, pod)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(res).To(BeNil())
+
+ var patchedCluster apiv1.Cluster
+ err = oe.cli.Get(ctx, k8client.ObjectKeyFromObject(cluster), &patchedCluster)
+ Expect(err).ToNot(HaveOccurred())
+
+ list, err = utils.GetFencedInstances(patchedCluster.Annotations)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(list.ToList()).To(BeEmpty())
+ })
+})
diff --git a/pkg/reconciler/hibernation/status.go b/pkg/reconciler/hibernation/status.go
index e8101e4bc0..8f16caa2a3 100644
--- a/pkg/reconciler/hibernation/status.go
+++ b/pkg/reconciler/hibernation/status.go
@@ -29,13 +29,11 @@ import (
)
const (
- // HibernationOff is the value of hibernation annotation when the hibernation
- // has been deactivated for the cluster
- HibernationOff = "off"
+ // HibernationOff is the shadow of utils.HibernationAnnotationValueOff, for compatibility
+ HibernationOff = string(utils.HibernationAnnotationValueOff)
- // HibernationOn is the value of hibernation annotation when the hibernation
- // has been requested for the cluster
- HibernationOn = "on"
+ // HibernationOn is the shadow of utils.HibernationAnnotationValueOn, for compatibility
+ HibernationOn = string(utils.HibernationAnnotationValueOn)
)
const (
@@ -79,18 +77,7 @@ func EnrichStatus(
cluster *apiv1.Cluster,
podList []corev1.Pod,
) {
- hibernationRequested, err := getHibernationAnnotationValue(cluster)
- if err != nil {
- meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{
- Type: HibernationConditionType,
- Status: metav1.ConditionFalse,
- Reason: HibernationConditionReasonWrongAnnotationValue,
- Message: err.Error(),
- })
- return
- }
-
- if !hibernationRequested {
+ if !isHibernationEnabled(cluster) {
meta.RemoveStatusCondition(&cluster.Status.Conditions, HibernationConditionType)
return
}
@@ -98,7 +85,8 @@ func EnrichStatus(
// We proceed to hibernate the cluster only when it is ready.
// Hibernating a non-ready cluster may be dangerous since the PVCs
// won't be completely created.
- if cluster.Status.Phase != apiv1.PhaseHealthy {
+ // We should stop the enrich status only when the cluster is unhealthy and the process hasn't already started
+ if cluster.Status.Phase != apiv1.PhaseHealthy && !isHibernationOngoing(cluster) {
return
}
@@ -132,20 +120,11 @@ func EnrichStatus(
})
}
-func getHibernationAnnotationValue(cluster *apiv1.Cluster) (bool, error) {
- value, ok := cluster.Annotations[utils.HibernationAnnotationName]
- if !ok {
- return false, nil
- }
-
- switch value {
- case HibernationOn:
- return true, nil
-
- case HibernationOff:
- return false, nil
+func isHibernationEnabled(cluster *apiv1.Cluster) bool {
+ return cluster.Annotations[utils.HibernationAnnotationName] == HibernationOn
+}
- default:
- return false, &ErrInvalidHibernationValue{value: value}
- }
+// isHibernationOngoing check if the cluster is doing the hibernation process
+func isHibernationOngoing(cluster *apiv1.Cluster) bool {
+ return meta.FindStatusCondition(cluster.Status.Conditions, HibernationConditionType) != nil
}
diff --git a/pkg/reconciler/hibernation/status_test.go b/pkg/reconciler/hibernation/status_test.go
index a1c2b99832..ba0012451f 100644
--- a/pkg/reconciler/hibernation/status_test.go
+++ b/pkg/reconciler/hibernation/status_test.go
@@ -31,7 +31,7 @@ import (
var _ = Describe("Hibernation annotation management", func() {
It("classifies clusters with no annotation as not hibernated", func() {
cluster := apiv1.Cluster{}
- Expect(getHibernationAnnotationValue(&cluster)).To(BeFalse())
+ Expect(isHibernationEnabled(&cluster)).To(BeFalse())
})
It("correctly handles on/off values", func() {
@@ -42,22 +42,10 @@ var _ = Describe("Hibernation annotation management", func() {
},
},
}
- Expect(getHibernationAnnotationValue(&cluster)).To(BeTrue())
+ Expect(isHibernationEnabled(&cluster)).To(BeTrue())
cluster.ObjectMeta.Annotations[utils.HibernationAnnotationName] = HibernationOff
- Expect(getHibernationAnnotationValue(&cluster)).To(BeFalse())
- })
-
- It("fails when the value of the annotation is not correct", func() {
- cluster := apiv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- utils.HibernationAnnotationName: "not-correct",
- },
- },
- }
- _, err := getHibernationAnnotationValue(&cluster)
- Expect(err).ToNot(Succeed())
+ Expect(isHibernationEnabled(&cluster)).To(BeFalse())
})
})
@@ -68,25 +56,6 @@ var _ = Describe("Status enrichment", func() {
Expect(cluster.Status.Conditions).To(BeEmpty())
})
- It("adds an error condition when the hibernation annotation has a wrong value", func(ctx SpecContext) {
- cluster := apiv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{
- Annotations: map[string]string{
- utils.HibernationAnnotationName: "not-correct",
- },
- },
- Status: apiv1.ClusterStatus{
- Phase: apiv1.PhaseHealthy,
- },
- }
- EnrichStatus(ctx, &cluster, nil)
-
- hibernationCondition := meta.FindStatusCondition(cluster.Status.Conditions, HibernationConditionType)
- Expect(hibernationCondition).ToNot(BeNil())
- Expect(hibernationCondition.Status).To(Equal(metav1.ConditionFalse))
- Expect(hibernationCondition.Reason).To(Equal(HibernationConditionReasonWrongAnnotationValue))
- })
-
It("removes the hibernation condition when hibernation is turned off", func(ctx SpecContext) {
cluster := apiv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
diff --git a/pkg/reconciler/instance/metadata.go b/pkg/reconciler/instance/metadata.go
index f5faa02e4d..aa3caed421 100644
--- a/pkg/reconciler/instance/metadata.go
+++ b/pkg/reconciler/instance/metadata.go
@@ -35,13 +35,13 @@ func ReconcileMetadata(
ctx context.Context,
cli client.Client,
cluster *apiv1.Cluster,
- instances corev1.PodList,
+ instances []corev1.Pod,
) error {
contextLogger := log.FromContext(ctx)
- for idx := range instances.Items {
- origInstance := instances.Items[idx].DeepCopy()
- instance := &instances.Items[idx]
+ for idx := range instances {
+ origInstance := instances[idx].DeepCopy()
+ instance := &instances[idx]
// Update the labels for the -rw service to work correctly
modified := updateRoleLabels(ctx, cluster, instance)
diff --git a/pkg/reconciler/instance/metadata_test.go b/pkg/reconciler/instance/metadata_test.go
index 606847c969..49cd9617b0 100644
--- a/pkg/reconciler/instance/metadata_test.go
+++ b/pkg/reconciler/instance/metadata_test.go
@@ -460,11 +460,9 @@ var _ = Describe("object metadata test", func() {
var _ = Describe("metadata reconciliation test", func() {
Context("ReconcileMetadata", func() {
It("Should update all pods metadata successfully", func() {
- instanceList := corev1.PodList{
- Items: []corev1.Pod{
- {ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
- {ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
- },
+ instances := []corev1.Pod{
+ {ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
+ {ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
}
cluster := &apiv1.Cluster{
@@ -481,16 +479,16 @@ var _ = Describe("metadata reconciliation test", func() {
cli := fake.NewClientBuilder().
WithScheme(scheme.BuildWithAllKnownScheme()).
- WithObjects(&instanceList.Items[0], &instanceList.Items[1]).
+ WithObjects(&instances[0], &instances[1]).
Build()
- err := ReconcileMetadata(context.Background(), cli, cluster, instanceList)
+ err := ReconcileMetadata(context.Background(), cli, cluster, instances)
Expect(err).ToNot(HaveOccurred())
var updatedInstanceList corev1.PodList
err = cli.List(context.Background(), &updatedInstanceList)
Expect(err).ToNot(HaveOccurred())
- Expect(updatedInstanceList.Items).To(HaveLen(len(instanceList.Items)))
+ Expect(updatedInstanceList.Items).To(HaveLen(len(instances)))
for _, pod := range updatedInstanceList.Items {
Expect(pod.Labels[utils.PodRoleLabelName]).To(Equal(string(utils.PodRoleInstance)))
diff --git a/pkg/reconciler/persistentvolumeclaim/instance.go b/pkg/reconciler/persistentvolumeclaim/instance.go
index e20c0a4638..362f5578b6 100644
--- a/pkg/reconciler/persistentvolumeclaim/instance.go
+++ b/pkg/reconciler/persistentvolumeclaim/instance.go
@@ -81,6 +81,7 @@ func reconcileSingleInstanceMissingPVCs(
var shouldReconcile bool
instanceName := specs.GetInstanceName(cluster.Name, serial)
for _, expectedPVC := range getExpectedPVCsFromCluster(cluster, instanceName) {
+ // Continue if the expectedPVC is in present in the current PVC list
if slices.ContainsFunc(pvcs, func(pvc corev1.PersistentVolumeClaim) bool { return expectedPVC.name == pvc.Name }) {
continue
}
diff --git a/pkg/reconciler/persistentvolumeclaim/metadata.go b/pkg/reconciler/persistentvolumeclaim/metadata.go
index ce86483629..a0e6678914 100644
--- a/pkg/reconciler/persistentvolumeclaim/metadata.go
+++ b/pkg/reconciler/persistentvolumeclaim/metadata.go
@@ -68,66 +68,56 @@ func (m metadataReconciler) reconcile(
return nil
}
-// reconcileMetadataComingFromInstance ensures that the PVCs have the correct metadata that is inherited by the instance
-func reconcileMetadataComingFromInstance(
+// reconcileInstanceRoleLabel ensures that the PVCs have the correct instance role label attached to them
+func reconcileInstanceRoleLabel(
ctx context.Context,
c client.Client,
cluster *apiv1.Cluster,
- runningInstances []corev1.Pod,
pvcs []corev1.PersistentVolumeClaim,
) error {
- for _, pod := range runningInstances {
- podRole, podHasRole := utils.GetInstanceRole(pod.ObjectMeta.Labels)
- podSerial, podSerialErr := specs.GetNodeSerial(pod.ObjectMeta)
- if podSerialErr != nil {
- return podSerialErr
+ if cluster.Status.CurrentPrimary == "" {
+ return nil
+ }
+ for _, instanceName := range cluster.Status.InstanceNames {
+ instanceRole := specs.ClusterRoleLabelReplica
+ if instanceName == cluster.Status.CurrentPrimary {
+ instanceRole = specs.ClusterRoleLabelPrimary
}
instanceReconciler := metadataReconciler{
- name: "instance-inheritance",
+ name: "instance-role",
isUpToDate: func(pvc *corev1.PersistentVolumeClaim) bool {
- if podHasRole && pvc.ObjectMeta.Labels[utils.ClusterRoleLabelName] != podRole {
+ if pvc.ObjectMeta.Labels[utils.ClusterRoleLabelName] != instanceRole {
return false
}
- if podHasRole && pvc.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] != podRole {
- return false
- }
-
- if serial, err := specs.GetNodeSerial(pvc.ObjectMeta); err != nil || serial != podSerial {
+ if pvc.ObjectMeta.Labels[utils.ClusterInstanceRoleLabelName] != instanceRole {
return false
}
return true
},
update: func(pvc *corev1.PersistentVolumeClaim) {
- utils.SetInstanceRole(pvc.ObjectMeta, podRole)
-
- if pvc.Annotations == nil {
- pvc.Annotations = map[string]string{}
- }
-
- pvc.Annotations[utils.ClusterSerialAnnotationName] = strconv.Itoa(podSerial)
+ utils.SetInstanceRole(pvc.ObjectMeta, instanceRole)
},
}
// todo: this should not rely on expected cluster instance pvc but should fetch every possible pvc name
- instancePVCs := filterByInstanceExpectedPVCs(cluster, pod.Name, pvcs)
+ instancePVCs := filterByInstanceExpectedPVCs(cluster, instanceName, pvcs)
if err := instanceReconciler.reconcile(ctx, c, instancePVCs); err != nil {
return err
}
}
-
return nil
}
-func reconcileMetadata(
+// ReconcileMetadata a ensures that the pvc metadata is kept up to date
+func ReconcileMetadata(
ctx context.Context,
c client.Client,
cluster *apiv1.Cluster,
- runningInstances []corev1.Pod,
pvcs []corev1.PersistentVolumeClaim,
) error {
- if err := reconcileMetadataComingFromInstance(ctx, c, cluster, runningInstances, pvcs); err != nil {
+ if err := reconcileInstanceRoleLabel(ctx, c, cluster, pvcs); err != nil {
return fmt.Errorf("cannot update role labels on pvcs: %w", err)
}
@@ -142,6 +132,47 @@ func reconcileMetadata(
return nil
}
+// ReconcileSerialAnnotation ensures that all the PVCs have the correct serial annotation
+func ReconcileSerialAnnotation(
+ ctx context.Context,
+ c client.Client,
+ cluster *apiv1.Cluster,
+ runningInstances []corev1.Pod,
+ pvcs []corev1.PersistentVolumeClaim,
+) error {
+ for _, pod := range runningInstances {
+ podSerial, podSerialErr := specs.GetNodeSerial(pod.ObjectMeta)
+ if podSerialErr != nil {
+ return podSerialErr
+ }
+
+ instanceReconciler := metadataReconciler{
+ name: "serial",
+ isUpToDate: func(pvc *corev1.PersistentVolumeClaim) bool {
+ if serial, err := specs.GetNodeSerial(pvc.ObjectMeta); err != nil || serial != podSerial {
+ return false
+ }
+
+ return true
+ },
+ update: func(pvc *corev1.PersistentVolumeClaim) {
+ if pvc.Annotations == nil {
+ pvc.Annotations = map[string]string{}
+ }
+
+ pvc.Annotations[utils.ClusterSerialAnnotationName] = strconv.Itoa(podSerial)
+ },
+ }
+
+ // todo: this should not rely on expected cluster instance pvc but should fetch every possible pvc name
+ instancePVCs := filterByInstanceExpectedPVCs(cluster, pod.Name, pvcs)
+ if err := instanceReconciler.reconcile(ctx, c, instancePVCs); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func newAnnotationReconciler(cluster *apiv1.Cluster) metadataReconciler {
return metadataReconciler{
name: "annotations",
diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler.go b/pkg/reconciler/persistentvolumeclaim/reconciler.go
index 0429bd3415..ea0f3b3c4f 100644
--- a/pkg/reconciler/persistentvolumeclaim/reconciler.go
+++ b/pkg/reconciler/persistentvolumeclaim/reconciler.go
@@ -38,10 +38,6 @@ func Reconcile(
) (ctrl.Result, error) {
contextLogger := log.FromContext(ctx)
- if err := reconcileMetadata(ctx, c, cluster, instances, pvcs); err != nil {
- return ctrl.Result{}, err
- }
-
if res, err := reconcileMultipleInstancesMissingPVCs(ctx, c, cluster, instances, pvcs); !res.IsZero() || err != nil {
return res, err
}
diff --git a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go
index 3bc3624587..4ca794faa8 100644
--- a/pkg/reconciler/persistentvolumeclaim/reconciler_test.go
+++ b/pkg/reconciler/persistentvolumeclaim/reconciler_test.go
@@ -38,16 +38,16 @@ import (
. "github.com/onsi/gomega"
)
-var _ = Describe("Reconcile Resources", func() {
+var _ = Describe("Reconcile Metadata", func() {
It("Reconcile existing resources shouldn't fail and "+
"it should make sure to add the new instanceRole label to existing PVC", func() {
clusterName := "Cluster-pvc-resources"
pvcs := corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
- makePVC(clusterName, "1", NewPgDataCalculator(), false),
- makePVC(clusterName, "2", NewPgWalCalculator(), false), // role is out of sync with name
- makePVC(clusterName, "3-wal", NewPgDataCalculator(), false), // role is out of sync with name
- makePVC(clusterName, "3", NewPgDataCalculator(), false),
+ makePVC(clusterName, "1", "1", NewPgDataCalculator(), false),
+ makePVC(clusterName, "2", "2", NewPgWalCalculator(), false), // role is out of sync with name
+ makePVC(clusterName, "3-wal", "3", NewPgDataCalculator(), false), // role is out of sync with name
+ makePVC(clusterName, "3", "3", NewPgDataCalculator(), false),
},
}
cluster := &apiv1.Cluster{
@@ -68,6 +68,10 @@ var _ = Describe("Reconcile Resources", func() {
Size: "1Gi",
},
},
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: clusterName + "-1",
+ InstanceNames: []string{clusterName + "-1", clusterName + "-2", clusterName + "-3"},
+ },
}
pods := corev1.PodList{
@@ -127,11 +131,10 @@ var _ = Describe("Reconcile Resources", func() {
configuration.Current.InheritedAnnotations = []string{"annotation1"}
configuration.Current.InheritedLabels = []string{"label1"}
- _, err := Reconcile(
+ err := ReconcileMetadata(
context.Background(),
cli,
cluster,
- pods.Items,
pvcs.Items,
)
Expect(err).ToNot(HaveOccurred())
@@ -196,10 +199,10 @@ var _ = Describe("PVC reconciliation", func() {
It("Will reconcile each PVC's with the correct labels", func() {
pvcs := corev1.PersistentVolumeClaimList{
Items: []corev1.PersistentVolumeClaim{
- makePVC(clusterName, "1", NewPgDataCalculator(), false),
- makePVC(clusterName, "2", NewPgWalCalculator(), false), // role is out of sync with name
- makePVC(clusterName, "3-wal", NewPgDataCalculator(), false), // role is out of sync with name
- makePVC(clusterName, "3", NewPgDataCalculator(), false),
+ makePVC(clusterName, "1", "1", NewPgDataCalculator(), false),
+ makePVC(clusterName, "2", "2", NewPgWalCalculator(), false), // role is out of sync with name
+ makePVC(clusterName, "3-wal", "3", NewPgDataCalculator(), false), // role is out of sync with name
+ makePVC(clusterName, "3", "3", NewPgDataCalculator(), false),
},
}
cluster := &apiv1.Cluster{
@@ -253,7 +256,7 @@ var _ = Describe("PVC reconciliation", func() {
Expect(err).ToNot(HaveOccurred())
Expect(pvcs.Items[2].Annotations).To(BeEquivalentTo(map[string]string{
utils.PVCStatusAnnotationName: "ready",
- utils.ClusterSerialAnnotationName: "3-wal",
+ utils.ClusterSerialAnnotationName: "3",
"annotation1": "value",
"annotation2": "value",
}))
@@ -281,10 +284,10 @@ var _ = Describe("PVC reconciliation", func() {
},
}
- pvc := makePVC(clusterName, "1", NewPgDataCalculator(), false)
- pvc2 := makePVC(clusterName, "2", NewPgWalCalculator(), false) // role is out of sync with name
- pvc3Wal := makePVC(clusterName, "3-wal", NewPgDataCalculator(), false) // role is out of sync with name
- pvc3Data := makePVC(clusterName, "3", nil, false)
+ pvc := makePVC(clusterName, "1", "1", NewPgDataCalculator(), false)
+ pvc2 := makePVC(clusterName, "2", "2", NewPgWalCalculator(), false) // role is out of sync with name
+ pvc3Wal := makePVC(clusterName, "3-wal", "3", NewPgDataCalculator(), false) // role is out of sync with name
+ pvc3Data := makePVC(clusterName, "3", "3", nil, false)
pvcs := []corev1.PersistentVolumeClaim{
pvc,
pvc2,
@@ -332,8 +335,12 @@ var _ = Describe("PVC reconciliation", func() {
It("will reconcile each PVC's instance-relative labels by invoking the instance metadata reconciler", func() {
cluster := &apiv1.Cluster{
- ObjectMeta: metav1.ObjectMeta{Name: "test-name", Namespace: "test-namespace"},
+ ObjectMeta: metav1.ObjectMeta{Name: clusterName, Namespace: "test-namespace"},
Spec: apiv1.ClusterSpec{WalStorage: &apiv1.StorageConfiguration{Size: "1Gi"}},
+ Status: apiv1.ClusterStatus{
+ CurrentPrimary: clusterName + "-1",
+ InstanceNames: []string{clusterName + "-1", clusterName + "-2", clusterName + "-3"},
+ },
}
pods := []corev1.Pod{
@@ -342,10 +349,10 @@ var _ = Describe("PVC reconciliation", func() {
makePod(clusterName, "3", specs.ClusterRoleLabelReplica),
}
- pvc := makePVC(clusterName, "1", NewPgDataCalculator(), false)
- pvc2 := makePVC(clusterName, "2", NewPgDataCalculator(), false)
- pvc3Wal := makePVC(clusterName, "3-wal", NewPgWalCalculator(), false)
- pvc3Data := makePVC(clusterName, "3", NewPgDataCalculator(), false)
+ pvc := makePVC(clusterName, "1", "0", NewPgDataCalculator(), false)
+ pvc2 := makePVC(clusterName, "2", "0", NewPgDataCalculator(), false)
+ pvc3Wal := makePVC(clusterName, "3-wal", "0", NewPgWalCalculator(), false)
+ pvc3Data := makePVC(clusterName, "3", "0", NewPgDataCalculator(), false)
pvcs := []corev1.PersistentVolumeClaim{
pvc,
pvc2,
@@ -357,7 +364,14 @@ var _ = Describe("PVC reconciliation", func() {
WithObjects(&pvc, &pvc2, &pvc3Wal, &pvc3Data).
Build()
- err := reconcileMetadataComingFromInstance(
+ err := ReconcileMetadata(
+ context.Background(),
+ cl,
+ cluster,
+ pvcs)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = ReconcileSerialAnnotation(
context.Background(),
cl,
cluster,
@@ -372,6 +386,10 @@ var _ = Describe("PVC reconciliation", func() {
utils.ClusterRoleLabelName: "primary",
utils.ClusterInstanceRoleLabelName: "primary",
}))
+ Expect(patchedPvc.Annotations).To(Equal(map[string]string{
+ utils.ClusterSerialAnnotationName: "1",
+ utils.PVCStatusAnnotationName: "ready",
+ }))
patchedPvc2 := fetchPVC(cl, pvc2)
Expect(patchedPvc2.Labels).To(Equal(map[string]string{
@@ -380,14 +398,22 @@ var _ = Describe("PVC reconciliation", func() {
utils.ClusterRoleLabelName: "replica",
utils.ClusterInstanceRoleLabelName: "replica",
}))
+ Expect(patchedPvc2.Annotations).To(Equal(map[string]string{
+ utils.ClusterSerialAnnotationName: "2",
+ utils.PVCStatusAnnotationName: "ready",
+ }))
patchedPvc3Wal := fetchPVC(cl, pvc3Wal)
Expect(patchedPvc3Wal.Labels).To(Equal(map[string]string{
- utils.InstanceNameLabelName: clusterName + "-3-wal",
+ utils.InstanceNameLabelName: clusterName + "-3",
utils.PvcRoleLabelName: "PG_WAL",
utils.ClusterRoleLabelName: "replica",
utils.ClusterInstanceRoleLabelName: "replica",
}))
+ Expect(patchedPvc3Wal.Annotations).To(Equal(map[string]string{
+ utils.ClusterSerialAnnotationName: "3",
+ utils.PVCStatusAnnotationName: "ready",
+ }))
patchedPvc3Data := fetchPVC(cl, pvc3Data)
Expect(patchedPvc3Data.Labels).To(Equal(map[string]string{
@@ -396,6 +422,10 @@ var _ = Describe("PVC reconciliation", func() {
utils.ClusterRoleLabelName: "replica",
utils.ClusterInstanceRoleLabelName: "replica",
}))
+ Expect(patchedPvc3Data.Annotations).To(Equal(map[string]string{
+ utils.ClusterSerialAnnotationName: "3",
+ utils.PVCStatusAnnotationName: "ready",
+ }))
})
})
@@ -433,9 +463,9 @@ var _ = Describe("Reconcile PVC Quantity", func() {
Name: clusterName,
},
}
- pvc = makePVC(clusterName, "1", NewPgDataCalculator(), false)
+ pvc = makePVC(clusterName, "1", "1", NewPgDataCalculator(), false)
tbsName := "fragglerock"
- pvc2 = makePVC(clusterName, "2", NewPgTablespaceCalculator(tbsName), false)
+ pvc2 = makePVC(clusterName, "2", "2", NewPgTablespaceCalculator(tbsName), false)
pvc2.Spec.Resources.Requests = map[corev1.ResourceName]resource.Quantity{
"storage": resource.MustParse("3Gi"),
}
diff --git a/pkg/reconciler/persistentvolumeclaim/resources_test.go b/pkg/reconciler/persistentvolumeclaim/resources_test.go
index 6606825733..bbccc10553 100644
--- a/pkg/reconciler/persistentvolumeclaim/resources_test.go
+++ b/pkg/reconciler/persistentvolumeclaim/resources_test.go
@@ -34,7 +34,7 @@ var _ = Describe("PVC detection", func() {
It("will list PVCs with Jobs or Pods or which are Ready", func() {
clusterName := "myCluster"
makeClusterPVC := func(serial string, isResizing bool) corev1.PersistentVolumeClaim {
- return makePVC(clusterName, serial, NewPgDataCalculator(), isResizing)
+ return makePVC(clusterName, serial, serial, NewPgDataCalculator(), isResizing)
}
pvcs := []corev1.PersistentVolumeClaim{
makeClusterPVC("1", false), // has a Pod
diff --git a/pkg/reconciler/persistentvolumeclaim/storagesource.go b/pkg/reconciler/persistentvolumeclaim/storagesource.go
index b77cc8f4d1..9f1dcbfcac 100644
--- a/pkg/reconciler/persistentvolumeclaim/storagesource.go
+++ b/pkg/reconciler/persistentvolumeclaim/storagesource.go
@@ -169,15 +169,9 @@ func getCandidateSourceFromBackup(backup *apiv1.Backup) *StorageSource {
// from a Cluster definition, taking into consideration the backup that the
// cluster has been bootstrapped from
func getCandidateSourceFromClusterDefinition(cluster *apiv1.Cluster) *StorageSource {
- if cluster.Spec.Bootstrap == nil {
- return nil
- }
-
- if cluster.Spec.Bootstrap.Recovery == nil {
- return nil
- }
-
- if cluster.Spec.Bootstrap.Recovery.VolumeSnapshots == nil {
+ if cluster.Spec.Bootstrap == nil ||
+ cluster.Spec.Bootstrap.Recovery == nil ||
+ cluster.Spec.Bootstrap.Recovery.VolumeSnapshots == nil {
return nil
}
diff --git a/pkg/reconciler/persistentvolumeclaim/suite_test.go b/pkg/reconciler/persistentvolumeclaim/suite_test.go
index bd0d39988a..3698c99e66 100644
--- a/pkg/reconciler/persistentvolumeclaim/suite_test.go
+++ b/pkg/reconciler/persistentvolumeclaim/suite_test.go
@@ -34,9 +34,15 @@ func TestSpecs(t *testing.T) {
RunSpecs(t, "PersistentVolumeClaim reconciler")
}
-func makePVC(clusterName string, suffix string, meta Meta, isResizing bool) corev1.PersistentVolumeClaim {
+func makePVC(
+ clusterName string,
+ suffix string,
+ serial string,
+ meta Meta,
+ isResizing bool,
+) corev1.PersistentVolumeClaim {
annotations := map[string]string{
- utils.ClusterSerialAnnotationName: suffix,
+ utils.ClusterSerialAnnotationName: serial,
utils.PVCStatusAnnotationName: StatusReady,
}
diff --git a/pkg/resources/retry.go b/pkg/resources/retry.go
index bc1e214387..4a0d13aa1b 100644
--- a/pkg/resources/retry.go
+++ b/pkg/resources/retry.go
@@ -16,5 +16,28 @@ limitations under the License.
package resources
+import (
+ "context"
+
+ "k8s.io/client-go/util/retry"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
// RetryAlways is a function that always returns true on any error encountered
func RetryAlways(_ error) bool { return true }
+
+// RetryWithRefreshedResource updates the resource before invoking the cb
+func RetryWithRefreshedResource(
+ ctx context.Context,
+ cli client.Client,
+ resource client.Object,
+ cb func() error,
+) error {
+ return retry.OnError(retry.DefaultBackoff, RetryAlways, func() error {
+ if err := cli.Get(ctx, client.ObjectKeyFromObject(resource), resource); err != nil {
+ return err
+ }
+
+ return cb()
+ })
+}
diff --git a/pkg/resources/retry_test.go b/pkg/resources/retry_test.go
new file mode 100644
index 0000000000..04ae25cb90
--- /dev/null
+++ b/pkg/resources/retry_test.go
@@ -0,0 +1,97 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "context"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ schemeBuilder "github.com/cloudnative-pg/cloudnative-pg/internal/scheme"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+var _ = Describe("RetryWithRefreshedResource", func() {
+ const (
+ name = "test-deployment"
+ namespace = "default"
+ )
+
+ var (
+ fakeClient client.Client
+ testResource *appsv1.Deployment
+ ctx context.Context
+ )
+
+ BeforeEach(func() {
+ ctx = context.TODO()
+ fakeClient = fake.NewClientBuilder().WithScheme(schemeBuilder.BuildWithAllKnownScheme()).Build()
+ testResource = &appsv1.Deployment{
+ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
+ Spec: appsv1.DeploymentSpec{
+ Replicas: ptr.To(int32(1)),
+ Selector: &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ },
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"app": "test"}},
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "test-container",
+ Image: "nginx",
+ },
+ },
+ },
+ },
+ },
+ }
+ })
+
+ Context("when client.Get succeeds", func() {
+ BeforeEach(func() {
+ // Set up the fake client to return the resource without error
+ Expect(fakeClient.Create(ctx, testResource)).To(Succeed())
+
+ modified := testResource.DeepCopy()
+ modified.Spec.Replicas = ptr.To(int32(10))
+ err := fakeClient.Update(ctx, modified)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ It("should invoke the callback without error and update the resource", func() {
+ // ensure that the local deployment contains the old value
+ Expect(*testResource.Spec.Replicas).To(Equal(int32(1)))
+
+ cb := func() error {
+ return nil
+ }
+
+ // ensure that now the deployment contains the new value
+ err := RetryWithRefreshedResource(ctx, fakeClient, testResource, cb)
+ Expect(err).ToNot(HaveOccurred())
+ Expect(*testResource.Spec.Replicas).To(Equal(int32(10)))
+ })
+ })
+})
diff --git a/pkg/resources/suite_test.go b/pkg/resources/suite_test.go
new file mode 100644
index 0000000000..9accbbf2ed
--- /dev/null
+++ b/pkg/resources/suite_test.go
@@ -0,0 +1,29 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package resources
+
+import (
+ "testing"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+)
+
+func TestResources(t *testing.T) {
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Resources Suite")
+}
diff --git a/pkg/specs/jobs.go b/pkg/specs/jobs.go
index 0c73aae077..5cecaaa981 100644
--- a/pkg/specs/jobs.go
+++ b/pkg/specs/jobs.go
@@ -334,7 +334,7 @@ func createPrimaryJob(cluster apiv1.Cluster, nodeSerial int, role jobRole, initC
SecurityContext: CreateContainerSecurityContext(cluster.GetSeccompProfile()),
},
},
- Volumes: createPostgresVolumes(cluster, instanceName),
+ Volumes: createPostgresVolumes(&cluster, instanceName),
SecurityContext: CreatePodSecurityContext(
cluster.GetSeccompProfile(),
cluster.GetPostgresUID(),
diff --git a/pkg/specs/pgbouncer/deployments.go b/pkg/specs/pgbouncer/deployments.go
index 1ede06a049..515c368b14 100644
--- a/pkg/specs/pgbouncer/deployments.go
+++ b/pkg/specs/pgbouncer/deployments.go
@@ -37,7 +37,7 @@ import (
const (
// DefaultPgbouncerImage is the name of the pgbouncer image used by default
- DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.22.0"
+ DefaultPgbouncerImage = "ghcr.io/cloudnative-pg/pgbouncer:1.22.1"
)
// Deployment create the deployment of pgbouncer, given
diff --git a/pkg/specs/pods.go b/pkg/specs/pods.go
index e4cc1b595c..6e68b705ff 100644
--- a/pkg/specs/pods.go
+++ b/pkg/specs/pods.go
@@ -167,7 +167,7 @@ func CreateClusterPodSpec(
},
SchedulerName: cluster.Spec.SchedulerName,
Containers: createPostgresContainers(cluster, envConfig),
- Volumes: createPostgresVolumes(cluster, podName),
+ Volumes: createPostgresVolumes(&cluster, podName),
SecurityContext: CreatePodSecurityContext(
cluster.GetSeccompProfile(),
cluster.GetPostgresUID(),
diff --git a/pkg/specs/volumes.go b/pkg/specs/volumes.go
index 52d5a1617e..e397e8ea5b 100644
--- a/pkg/specs/volumes.go
+++ b/pkg/specs/volumes.go
@@ -88,7 +88,7 @@ func SnapshotBackupNameForTablespace(backupName, tablespaceName string) string {
return backupName + apiv1.TablespaceVolumeInfix + convertPostgresIDToK8sName(tablespaceName)
}
-func createPostgresVolumes(cluster apiv1.Cluster, podName string) []corev1.Volume {
+func createPostgresVolumes(cluster *apiv1.Cluster, podName string) []corev1.Volume {
result := []corev1.Volume{
{
Name: "pgdata",
@@ -293,7 +293,7 @@ func createPostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount {
// we should create volumeMounts in fixed sequence as podSpec will store it in annotation and
// later it will be retrieved to do deepEquals
if cluster.ContainsTablespaces() {
- tbsNames := getSortedTablespaceList(cluster)
+ tbsNames := getSortedTablespaceList(&cluster)
for i := range tbsNames {
volumeMounts = append(volumeMounts,
corev1.VolumeMount{
@@ -306,7 +306,7 @@ func createPostgresVolumeMounts(cluster apiv1.Cluster) []corev1.VolumeMount {
return volumeMounts
}
-func getSortedTablespaceList(cluster apiv1.Cluster) []string {
+func getSortedTablespaceList(cluster *apiv1.Cluster) []string {
// Try to get a fix order of name
tbsNames := make([]string, len(cluster.Spec.Tablespaces))
i := 0
@@ -318,7 +318,7 @@ func getSortedTablespaceList(cluster apiv1.Cluster) []string {
return tbsNames
}
-func createEphemeralVolume(cluster apiv1.Cluster) corev1.Volume {
+func createEphemeralVolume(cluster *apiv1.Cluster) corev1.Volume {
scratchVolumeSource := corev1.VolumeSource{}
if cluster.Spec.EphemeralVolumeSource != nil {
scratchVolumeSource.Ephemeral = cluster.Spec.EphemeralVolumeSource
@@ -333,7 +333,7 @@ func createEphemeralVolume(cluster apiv1.Cluster) corev1.Volume {
}
}
-func createProjectedVolume(cluster apiv1.Cluster) corev1.Volume {
+func createProjectedVolume(cluster *apiv1.Cluster) corev1.Volume {
return corev1.Volume{
Name: "projected",
VolumeSource: corev1.VolumeSource{
diff --git a/pkg/specs/volumes_test.go b/pkg/specs/volumes_test.go
index 3f0765cb1c..a6e2b75287 100644
--- a/pkg/specs/volumes_test.go
+++ b/pkg/specs/volumes_test.go
@@ -386,7 +386,7 @@ var _ = DescribeTable("test creation of volume mounts",
var _ = DescribeTable("test creation of volumes",
func(cluster apiv1.Cluster, volumes []corev1.Volume) {
- vols := createPostgresVolumes(cluster, "pod-1")
+ vols := createPostgresVolumes(&cluster, "pod-1")
Expect(vols).NotTo(BeEmpty())
for _, v := range volumes {
Expect(vols).To(ContainElement(v))
@@ -483,7 +483,7 @@ var _ = Describe("createEphemeralVolume", func() {
})
It("should create an emptyDir volume by default", func() {
- ephemeralVolume := createEphemeralVolume(cluster)
+ ephemeralVolume := createEphemeralVolume(&cluster)
Expect(ephemeralVolume.Name).To(Equal("scratch-data"))
Expect(ephemeralVolume.VolumeSource.EmptyDir).NotTo(BeNil())
})
@@ -498,7 +498,7 @@ var _ = Describe("createEphemeralVolume", func() {
},
}
- ephemeralVolume := createEphemeralVolume(cluster)
+ ephemeralVolume := createEphemeralVolume(&cluster)
Expect(ephemeralVolume.Name).To(Equal("scratch-data"))
Expect(ephemeralVolume.EmptyDir).To(BeNil())
@@ -512,7 +512,7 @@ var _ = Describe("createEphemeralVolume", func() {
TemporaryData: &quantity,
}
- ephemeralVolume := createEphemeralVolume(cluster)
+ ephemeralVolume := createEphemeralVolume(&cluster)
Expect(ephemeralVolume.Name).To(Equal("scratch-data"))
Expect(*ephemeralVolume.VolumeSource.EmptyDir.SizeLimit).To(Equal(quantity))
diff --git a/pkg/utils/context.go b/pkg/utils/context.go
new file mode 100644
index 0000000000..49fe06543a
--- /dev/null
+++ b/pkg/utils/context.go
@@ -0,0 +1,23 @@
+/*
+Copyright The CloudNativePG Contributors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utils
+
+// contextKey a type used to assign values inside the context
+type contextKey string
+
+// ContextKeyCluster is the context key holding cluster data
+const ContextKeyCluster contextKey = "cluster"
diff --git a/pkg/utils/labels_annotations.go b/pkg/utils/labels_annotations.go
index 000d65cb41..36e3a4dc9c 100644
--- a/pkg/utils/labels_annotations.go
+++ b/pkg/utils/labels_annotations.go
@@ -131,6 +131,9 @@ const (
// PgControldataAnnotationName is the name of the annotation containing the pg_controldata output of the cluster
PgControldataAnnotationName = MetadataNamespace + "/pgControldata"
+ // SkipWalArchiving is the name of the annotation which turns off WAL archiving
+ SkipWalArchiving = MetadataNamespace + "/skipWalArchiving"
+
// skipEmptyWalArchiveCheck is the name of the annotation which turns off the checks that ensure that the WAL
// archive is empty before writing data
skipEmptyWalArchiveCheck = MetadataNamespace + "/skipEmptyWalArchiveCheck"
@@ -230,6 +233,19 @@ const (
PVCRolePgTablespace PVCRole = "PG_TABLESPACE"
)
+// HibernationAnnotationValue describes the status of the hibernation
+type HibernationAnnotationValue string
+
+const (
+ // HibernationAnnotationValueOff is the value of hibernation annotation when the hibernation
+ // has been deactivated for the cluster
+ HibernationAnnotationValueOff HibernationAnnotationValue = "off"
+
+ // HibernationAnnotationValueOn is the value of hibernation annotation when the hibernation
+ // has been requested for the cluster
+ HibernationAnnotationValueOn HibernationAnnotationValue = "on"
+)
+
// LabelClusterName labels the object with the cluster name
func LabelClusterName(object *metav1.ObjectMeta, name string) {
if object.Labels == nil {
@@ -376,6 +392,12 @@ func IsEmptyWalArchiveCheckEnabled(object *metav1.ObjectMeta) bool {
return object.Annotations[skipEmptyWalArchiveCheck] != string(annotationStatusEnabled)
}
+// IsWalArchivingDisabled returns a boolean indicating if PostgreSQL not archive
+// WAL files
+func IsWalArchivingDisabled(object *metav1.ObjectMeta) bool {
+ return object.Annotations[SkipWalArchiving] == string(annotationStatusEnabled)
+}
+
func mergeMap(receiver, giver map[string]string) map[string]string {
for key, value := range giver {
receiver[key] = value
diff --git a/pkg/versions/versions.go b/pkg/versions/versions.go
index 6fe54a5b02..28ead4ee79 100644
--- a/pkg/versions/versions.go
+++ b/pkg/versions/versions.go
@@ -20,13 +20,13 @@ package versions
const (
// Version is the version of the operator
- Version = "1.22.1"
+ Version = "1.22.2"
// DefaultImageName is the default image used by the operator to create pods
DefaultImageName = "ghcr.io/cloudnative-pg/postgresql:16.2"
// DefaultOperatorImageName is the default operator image used by the controller in the pods running PostgreSQL
- DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.22.1"
+ DefaultOperatorImageName = "ghcr.io/cloudnative-pg/cloudnative-pg:1.22.2"
)
// BuildInfo is a struct containing all the info about the build
@@ -36,7 +36,7 @@ type BuildInfo struct {
var (
// buildVersion injected during the build
- buildVersion = "1.22.1"
+ buildVersion = "1.22.2"
// buildCommit injected during the build
buildCommit = "none"
diff --git a/releases/cnpg-1.22.2.yaml b/releases/cnpg-1.22.2.yaml
new file mode 100644
index 0000000000..8837165623
--- /dev/null
+++ b/releases/cnpg-1.22.2.yaml
@@ -0,0 +1,14799 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-system
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: backups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Backup
+ listKind: BackupList
+ plural: backups
+ singular: backup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.method
+ name: Method
+ type: string
+ - jsonPath: .status.phase
+ name: Phase
+ type: string
+ - jsonPath: .status.error
+ name: Error
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Backup is the Schema for the backups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the backup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`
+ and `volumeSnapshot`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the backup. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without providing
+ explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ backupId:
+ description: The ID of the Barman backup
+ type: string
+ backupLabelFile:
+ description: Backup label file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ backupName:
+ description: The Name of the Barman backup
+ type: string
+ beginLSN:
+ description: The starting xlog
+ type: string
+ beginWal:
+ description: The starting WAL
+ type: string
+ commandError:
+ description: The backup command output in case of error
+ type: string
+ commandOutput:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data. This may not be populated in case of errors.
+ type: string
+ encryption:
+ description: Encryption method required to S3 API
+ type: string
+ endLSN:
+ description: The ending xlog
+ type: string
+ endWal:
+ description: The ending WAL
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ error:
+ description: The detected error
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google Cloud
+ Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage JSON
+ file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ instanceID:
+ description: Information to identify the instance where the backup
+ has been taken from
+ properties:
+ ContainerID:
+ description: The container ID
+ type: string
+ podName:
+ description: The pod name
+ type: string
+ type: object
+ method:
+ description: The backup method being used
+ type: string
+ online:
+ description: Whether the backup was online/hot (`true`) or offline/cold
+ (`false`)
+ type: boolean
+ phase:
+ description: The last backup status
+ type: string
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without providing
+ explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the region
+ name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ snapshotBackupStatus:
+ description: Status of the volumeSnapshot backup
+ properties:
+ elements:
+ description: The elements list, populated with the gathered volume
+ snapshots
+ items:
+ description: BackupSnapshotElementStatus is a volume snapshot
+ that is part of a volume snapshot method backup
+ properties:
+ name:
+ description: Name is the snapshot resource name
+ type: string
+ tablespaceName:
+ description: |-
+ TablespaceName is the name of the snapshotted tablespace. Only set
+ when type is PG_TABLESPACE
+ type: string
+ type:
+ description: Type is tho role of the snapshot in the cluster,
+ such as PG_DATA, PG_WAL and PG_TABLESPACE
+ type: string
+ required:
+ - name
+ - type
+ type: object
+ type: array
+ type: object
+ startedAt:
+ description: When the backup was started
+ format: date-time
+ type: string
+ stoppedAt:
+ description: When the backup was terminated
+ format: date-time
+ type: string
+ tablespaceMapFile:
+ description: Tablespace map file content as returned by Postgres in
+ case of online (hot) backups
+ format: byte
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: clusters.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Cluster
+ listKind: ClusterList
+ plural: clusters
+ singular: cluster
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - description: Number of instances
+ jsonPath: .status.instances
+ name: Instances
+ type: integer
+ - description: Number of ready instances
+ jsonPath: .status.readyInstances
+ name: Ready
+ type: integer
+ - description: Cluster current status
+ jsonPath: .status.phase
+ name: Status
+ type: string
+ - description: Primary pod
+ jsonPath: .status.currentPrimary
+ name: Primary
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Cluster is the Schema for the PostgreSQL API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the cluster.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ affinity:
+ description: Affinity/Anti-affinity rules for Pods
+ properties:
+ additionalPodAffinity:
+ description: AdditionalPodAffinity allows to specify pod affinity
+ terms to be passed to all the cluster's pods.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ additionalPodAntiAffinity:
+ description: |-
+ AdditionalPodAntiAffinity allows to specify pod anti-affinity terms to be added to the ones generated
+ by the operator if EnablePodAntiAffinity is set to true (default) or to be used exclusively if set to false.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term, associated
+ with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are
+ ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that
+ the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ enablePodAntiAffinity:
+ description: |-
+ Activates anti-affinity for the pods. The operator will define pods
+ anti-affinity unless this field is explicitly set to false
+ type: boolean
+ nodeAffinity:
+ description: |-
+ NodeAffinity describes node affinity scheduling rules for the pod.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated with the
+ corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching the corresponding
+ nodeSelectorTerm, in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector terms.
+ The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is map of key-value pairs used to define the nodes on which
+ the pods can run.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ podAntiAffinityType:
+ description: |-
+ PodAntiAffinityType allows the user to decide whether pod anti-affinity between cluster instance has to be
+ considered a strong requirement during scheduling or not. Allowed values are: "preferred" (default if empty) or
+ "required". Setting it to "required", could lead to instances remaining pending until new kubernetes nodes are
+ added if all the existing nodes don't match the required pod anti-affinity rule.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity
+ type: string
+ tolerations:
+ description: |-
+ Tolerations is a list of Tolerations that should be set for all the pods, in order to allow them to run
+ on tainted nodes.
+ More info: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologyKey:
+ description: |-
+ TopologyKey to use for anti-affinity configuration. See k8s documentation
+ for more info on that
+ type: string
+ type: object
+ backup:
+ description: The configuration to be used for backups
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud Storage
+ JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing the
+ region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ type: object
+ required:
+ - destinationPath
+ type: object
+ retentionPolicy:
+ description: |-
+ RetentionPolicy is the retention policy to be used for backups
+ and WALs (i.e. '60d'). The retention policy is expressed in the form
+ of `XXu` where `XX` is a positive integer and `u` is in `[dwm]` -
+ days, weeks, months.
+ It's currently only applicable when using the BarmanObjectStore method.
+ pattern: ^[1-9][0-9]*[dwm]$
+ type: string
+ target:
+ default: prefer-standby
+ description: |-
+ The policy to decide which instance should perform backups. Available
+ options are empty string, which will default to `prefer-standby` policy,
+ `primary` to have backups run always on primary instances, `prefer-standby`
+ to have backups run preferably on the most updated standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ volumeSnapshot:
+ description: VolumeSnapshot provides the configuration for the
+ execution of volume snapshot backups.
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: Annotations key-value pairs that will be added
+ to .metadata.annotations snapshot resources.
+ type: object
+ className:
+ description: |-
+ ClassName specifies the Snapshot Class to be used for PG_DATA PersistentVolumeClaim.
+ It is the default class for the other types if no specific class is present
+ type: string
+ labels:
+ additionalProperties:
+ type: string
+ description: Labels are key-value pairs that will be added
+ to .metadata.labels snapshot resources.
+ type: object
+ online:
+ default: true
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ type: boolean
+ onlineConfiguration:
+ default:
+ immediateCheckpoint: false
+ waitForArchive: true
+ description: Configuration parameters to control the online/hot
+ backup with volume snapshots
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ snapshotOwnerReference:
+ default: none
+ description: SnapshotOwnerReference indicates the type of
+ owner reference the snapshot should have
+ enum:
+ - none
+ - cluster
+ - backup
+ type: string
+ tablespaceClassName:
+ additionalProperties:
+ type: string
+ description: |-
+ TablespaceClassName specifies the Snapshot Class to be used for the tablespaces.
+ defaults to the PGDATA Snapshot Class, if set
+ type: object
+ walClassName:
+ description: WalClassName specifies the Snapshot Class to
+ be used for the PG_WAL PersistentVolumeClaim.
+ type: string
+ type: object
+ type: object
+ bootstrap:
+ description: Instructions to bootstrap this cluster
+ properties:
+ initdb:
+ description: Bootstrap the cluster via initdb
+ properties:
+ dataChecksums:
+ description: |-
+ Whether the `-k` option should be passed to initdb,
+ enabling checksums on data pages (default: `false`)
+ type: boolean
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ encoding:
+ description: The value to be passed as option `--encoding`
+ for initdb (default:`UTF8`)
+ type: string
+ import:
+ description: |-
+ Bootstraps the new cluster by importing data from an existing PostgreSQL
+ instance using logical backup (`pg_dump` and `pg_restore`)
+ properties:
+ databases:
+ description: The databases to import
+ items:
+ type: string
+ type: array
+ postImportApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is imported - to be used with extreme care
+ (by default empty). Only available in microservice type.
+ items:
+ type: string
+ type: array
+ roles:
+ description: The roles to import
+ items:
+ type: string
+ type: array
+ schemaOnly:
+ description: |-
+ When set to true, only the `pre-data` and `post-data` sections of
+ `pg_restore` are invoked, avoiding data import. Default: `false`.
+ type: boolean
+ source:
+ description: The source of the import
+ properties:
+ externalCluster:
+ description: The name of the externalCluster used
+ for import
+ type: string
+ required:
+ - externalCluster
+ type: object
+ type:
+ description: The import type. Can be `microservice` or
+ `monolith`.
+ enum:
+ - microservice
+ - monolith
+ type: string
+ required:
+ - databases
+ - source
+ - type
+ type: object
+ localeCType:
+ description: The value to be passed as option `--lc-ctype`
+ for initdb (default:`C`)
+ type: string
+ localeCollate:
+ description: The value to be passed as option `--lc-collate`
+ for initdb (default:`C`)
+ type: string
+ options:
+ description: |-
+ The list of options that must be passed to initdb when creating the cluster.
+ Deprecated: This could lead to inconsistent configurations,
+ please use the explicit provided parameters instead.
+ If defined, explicit values will be ignored.
+ items:
+ type: string
+ type: array
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ postInitApplicationSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the application
+ database right after is created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitApplicationSQLRefs:
+ description: |-
+ PostInitApplicationSQLRefs points references to ConfigMaps or Secrets which
+ contain SQL files, the general implementation order to these references is
+ from all Secrets to all ConfigMaps, and inside Secrets or ConfigMaps,
+ the implementation order is same as the order of each array
+ (by default empty)
+ properties:
+ configMapRefs:
+ description: ConfigMapRefs holds a list of references
+ to ConfigMaps
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ secretRefs:
+ description: SecretRefs holds a list of references to
+ Secrets
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ type: object
+ postInitSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser immediately
+ after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ postInitTemplateSQL:
+ description: |-
+ List of SQL queries to be executed as a superuser in the `template1`
+ after the cluster has been created - to be used with extreme care
+ (by default empty)
+ items:
+ type: string
+ type: array
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ walSegmentSize:
+ description: |-
+ The value in megabytes (1 to 1024) to be passed to the `--wal-segsize`
+ option for initdb (default: empty, resulting in PostgreSQL default: 16MB)
+ maximum: 1024
+ minimum: 1
+ type: integer
+ type: object
+ pg_basebackup:
+ description: |-
+ Bootstrap the cluster taking a physical backup of another compatible
+ PostgreSQL instance
+ properties:
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: The name of the server of which we need to take
+ a physical backup
+ minLength: 1
+ type: string
+ required:
+ - source
+ type: object
+ recovery:
+ description: Bootstrap the cluster from a backup
+ properties:
+ backup:
+ description: |-
+ The backup object containing the physical base backup from which to
+ initiate the recovery procedure.
+ Mutually exclusive with `source` and `volumeSnapshots`.
+ properties:
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive.
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ database:
+ description: 'Name of the database used by the application.
+ Default: `app`.'
+ type: string
+ owner:
+ description: |-
+ Name of the owner of the database in the instance to be used
+ by applications. Defaults to the value of the `database` key.
+ type: string
+ recoveryTarget:
+ description: |-
+ By default, the recovery process applies all the available
+ WAL files in the archive (full recovery). However, you can also
+ end the recovery as soon as a consistent state is reached or
+ recover to a point-in-time (PITR) by specifying a `RecoveryTarget` object,
+ as expected by PostgreSQL (i.e., timestamp, transaction Id, LSN, ...).
+ More info: https://www.postgresql.org/docs/current/runtime-config-wal.html#RUNTIME-CONFIG-WAL-RECOVERY-TARGET
+ properties:
+ backupID:
+ description: |-
+ The ID of the backup from which to start the recovery process.
+ If empty (default) the operator will automatically detect the backup
+ based on targetTime or targetLSN if specified. Otherwise use the
+ latest available backup in chronological order.
+ type: string
+ exclusive:
+ description: |-
+ Set the target to be exclusive. If omitted, defaults to false, so that
+ in Postgres, `recovery_target_inclusive` will be true
+ type: boolean
+ targetImmediate:
+ description: End recovery as soon as a consistent state
+ is reached
+ type: boolean
+ targetLSN:
+ description: The target LSN (Log Sequence Number)
+ type: string
+ targetName:
+ description: |-
+ The target name (to be previously created
+ with `pg_create_restore_point`)
+ type: string
+ targetTLI:
+ description: The target timeline ("latest" or a positive
+ integer)
+ type: string
+ targetTime:
+ description: The target time as a timestamp in the RFC3339
+ standard
+ type: string
+ targetXID:
+ description: The target transaction ID
+ type: string
+ type: object
+ secret:
+ description: |-
+ Name of the secret containing the initial credentials for the
+ owner of the user database. If empty a new secret will be
+ created from scratch
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ source:
+ description: |-
+ The external cluster whose backup we will restore. This is also
+ used as the name of the folder under which the backup is stored,
+ so it must be set to the name of the source cluster
+ Mutually exclusive with `backup`.
+ type: string
+ volumeSnapshots:
+ description: |-
+ The static PVC data source(s) from which to initiate the
+ recovery procedure. Currently supporting `VolumeSnapshot`
+ and `PersistentVolumeClaim` resources that map an existing
+ PVC group, compatible with CloudNativePG, and taken with
+ a cold backup copy on a fenced Postgres instance (limitation
+ which will be removed in the future when online backup
+ will be implemented).
+ Mutually exclusive with `backup`.
+ properties:
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ tablespaceStorage:
+ additionalProperties:
+ description: |-
+ TypedLocalObjectReference contains enough information to let you locate the
+ typed referenced object inside the same namespace.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ description: Configuration of the storage for PostgreSQL
+ tablespaces
+ type: object
+ walStorage:
+ description: Configuration of the storage for PostgreSQL
+ WAL (Write-Ahead Log)
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - storage
+ type: object
+ type: object
+ type: object
+ certificates:
+ description: The configuration for the CA and related certificates
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ description:
+ description: Description of this PostgreSQL cluster
+ type: string
+ enableSuperuserAccess:
+ default: false
+ description: |-
+ When this option is enabled, the operator will use the `SuperuserSecret`
+ to update the `postgres` user password (if the secret is
+ not present, the operator will automatically create one). When this
+ option is disabled, the operator will ignore the `SuperuserSecret` content, delete
+ it when automatically created, and then blank the password of the `postgres`
+ user by setting it to `NULL`. Disabled by default.
+ type: boolean
+ env:
+ description: |-
+ Env follows the Env format to pass environment variables
+ to the pods created in the cluster
+ items:
+ description: EnvVar represents an environment variable present in
+ a Container.
+ properties:
+ name:
+ description: Name of the environment variable. Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's value. Cannot
+ be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath is
+ written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for volumes,
+ optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of the exposed
+ resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ EnvFrom follows the EnvFrom format to pass environment variables
+ sources to the pods to be used by Env
+ items:
+ description: EnvFromSource represents the source of a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend to each key in
+ the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ ephemeralVolumeSource:
+ description: EphemeralVolumeSource allows the user to configure the
+ source of ephemeral volumes.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to
+ consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the
+ PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ ephemeralVolumesSizeLimit:
+ description: |-
+ EphemeralVolumesSizeLimit allows the user to set the limits for the ephemeral
+ volumes
+ properties:
+ shm:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Shm is the size limit of the shared memory volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ temporaryData:
+ anyOf:
+ - type: integer
+ - type: string
+ description: TemporaryData is the size limit of the temporary
+ data volume
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ externalClusters:
+ description: The list of external clusters which are used in the configuration
+ items:
+ description: |-
+ ExternalCluster represents the connection parameters to an
+ external cluster which is used in the other sections of the configuration
+ properties:
+ barmanObjectStore:
+ description: The configuration for the barman-cloud tool suite
+ properties:
+ azureCredentials:
+ description: The credentials to use to upload data to Azure
+ Blob Storage
+ properties:
+ connectionString:
+ description: The connection string to be used
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromAzureAD:
+ description: Use the Azure AD based authentication without
+ providing explicitly the keys.
+ type: boolean
+ storageAccount:
+ description: The storage account where to upload data
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageKey:
+ description: |-
+ The storage account key to be used in conjunction
+ with the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ storageSasToken:
+ description: |-
+ A shared-access-signature to be used in conjunction with
+ the storage account name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ data:
+ description: |-
+ The configuration to be used to backup the data files
+ When not defined, base backups files will be stored uncompressed and may
+ be unencrypted in the object store, according to the bucket default
+ policy.
+ properties:
+ additionalCommandArgs:
+ description: |-
+ AdditionalCommandArgs represents additional arguments that can be appended
+ to the 'barman-cloud-backup' command-line invocation. These arguments
+ provide flexibility to customize the backup process further according to
+ specific requirements or configurations.
+
+
+ Example:
+ In a scenario where specialized backup options are required, such as setting
+ a specific timeout or defining custom behavior, users can use this field
+ to specify additional command arguments.
+
+
+ Note:
+ It's essential to ensure that the provided arguments are valid and supported
+ by the 'barman-cloud-backup' command, to avoid potential errors or unintended
+ behavior during execution.
+ items:
+ type: string
+ type: array
+ compression:
+ description: |-
+ Compress a backup file (a tar file per tablespace) while streaming it
+ to the object store. Available options are empty string (no
+ compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ jobs:
+ description: |-
+ The number of parallel jobs to be used to upload the backup, defaults
+ to 2
+ format: int32
+ minimum: 1
+ type: integer
+ type: object
+ destinationPath:
+ description: |-
+ The path where to store the backup (i.e. s3://bucket/path/to/folder)
+ this path, with different destination folders, will be used for WALs
+ and for data
+ minLength: 1
+ type: string
+ endpointCA:
+ description: |-
+ EndpointCA store the CA bundle of the barman endpoint.
+ Useful when using self-signed certificates to avoid
+ errors with certificate issuer and barman-cloud-wal-archive
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ endpointURL:
+ description: |-
+ Endpoint to be used to upload data to the cloud,
+ overriding the automatic endpoint discovery
+ type: string
+ googleCredentials:
+ description: The credentials to use to upload data to Google
+ Cloud Storage
+ properties:
+ applicationCredentials:
+ description: The secret containing the Google Cloud
+ Storage JSON file with the credentials
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ gkeEnvironment:
+ description: |-
+ If set to true, will presume that it's running inside a GKE environment,
+ default to false.
+ type: boolean
+ type: object
+ historyTags:
+ additionalProperties:
+ type: string
+ description: |-
+ HistoryTags is a list of key value pairs that will be passed to the
+ Barman --history-tags option.
+ type: object
+ s3Credentials:
+ description: The credentials to use to upload data to S3
+ properties:
+ accessKeyId:
+ description: The reference to the access key id
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ inheritFromIAMRole:
+ description: Use the role based authentication without
+ providing explicitly the keys.
+ type: boolean
+ region:
+ description: The reference to the secret containing
+ the region name
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ secretAccessKey:
+ description: The reference to the secret access key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ sessionToken:
+ description: The references to the session key
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: object
+ serverName:
+ description: |-
+ The server name on S3, the cluster name is used if this
+ parameter is omitted
+ type: string
+ tags:
+ additionalProperties:
+ type: string
+ description: |-
+ Tags is a list of key value pairs that will be passed to the
+ Barman --tags option.
+ type: object
+ wal:
+ description: |-
+ The configuration for the backup of the WAL stream.
+ When not defined, WAL files will be stored uncompressed and may be
+ unencrypted in the object store, according to the bucket default policy.
+ properties:
+ compression:
+ description: |-
+ Compress a WAL file before sending it to the object store. Available
+ options are empty string (no compression, default), `gzip`, `bzip2` or `snappy`.
+ enum:
+ - gzip
+ - bzip2
+ - snappy
+ type: string
+ encryption:
+ description: |-
+ Whenever to force the encryption of files (if the bucket is
+ not already configured for that).
+ Allowed options are empty string (use the bucket policy, default),
+ `AES256` and `aws:kms`
+ enum:
+ - AES256
+ - aws:kms
+ type: string
+ maxParallel:
+ description: |-
+ Number of WAL files to be either archived in parallel (when the
+ PostgreSQL instance is archiving to a backup object store) or
+ restored in parallel (when a PostgreSQL standby is fetching WAL
+ files from a recovery object store). If not specified, WAL files
+ will be processed one at a time. It accepts a positive integer as a
+ value - with 1 being the minimum accepted value.
+ minimum: 1
+ type: integer
+ type: object
+ required:
+ - destinationPath
+ type: object
+ connectionParameters:
+ additionalProperties:
+ type: string
+ description: The list of connection parameters, such as dbname,
+ host, username, etc
+ type: object
+ name:
+ description: The server name, required
+ type: string
+ password:
+ description: |-
+ The reference to the password to be used to connect to the server.
+ If a password is provided, CloudNativePG creates a PostgreSQL
+ passfile at `/controller/external/NAME/pass` (where "NAME" is the
+ cluster's name). This passfile is automatically referenced in the
+ connection string when establishing a connection to the remote
+ PostgreSQL server from the current PostgreSQL `Cluster`. This ensures
+ secure and efficient password management for external clusters.
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslCert:
+ description: |-
+ The reference to an SSL certificate to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslKey:
+ description: |-
+ The reference to an SSL private key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ sslRootCert:
+ description: |-
+ The reference to an SSL CA public key to be used to connect to this
+ instance
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must
+ be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - name
+ type: object
+ type: array
+ failoverDelay:
+ default: 0
+ description: |-
+ The amount of time (in seconds) to wait before triggering a failover
+ after the primary PostgreSQL instance in the cluster was detected
+ to be unhealthy
+ format: int32
+ type: integer
+ imageName:
+ description: |-
+ Name of the container image, supporting both tags (`:`)
+ and digests for deterministic and repeatable deployments
+ (`:@sha256:`)
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of `Always`, `Never` or `IfNotPresent`.
+ If not defined, it defaults to `IfNotPresent`.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ imagePullSecrets:
+ description: The list of pull secrets to be used to pull the images
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate a
+ local object with a known type inside the same namespace
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ inheritedMetadata:
+ description: Metadata that will be inherited by all objects related
+ to the Cluster
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ type: object
+ type: object
+ instances:
+ default: 1
+ description: Number of instances required in the cluster
+ minimum: 1
+ type: integer
+ logLevel:
+ default: info
+ description: 'The instances'' log level, one of the following values:
+ error, warning, info (default), debug, trace'
+ enum:
+ - error
+ - warning
+ - info
+ - debug
+ - trace
+ type: string
+ managed:
+ description: The configuration that is used by the portions of PostgreSQL
+ that are managed by the instance manager
+ properties:
+ roles:
+ description: Database roles managed by the `Cluster`
+ items:
+ description: |-
+ RoleConfiguration is the representation, in Kubernetes, of a PostgreSQL role
+ with the additional field Ensure specifying whether to ensure the presence or
+ absence of the role in the database
+
+
+ The defaults of the CREATE ROLE command are applied
+ Reference: https://www.postgresql.org/docs/current/sql-createrole.html
+ properties:
+ bypassrls:
+ description: |-
+ Whether a role bypasses every row-level security (RLS) policy.
+ Default is `false`.
+ type: boolean
+ comment:
+ description: Description of the role
+ type: string
+ connectionLimit:
+ default: -1
+ description: |-
+ If the role can log in, this specifies how many concurrent
+ connections the role can make. `-1` (the default) means no limit.
+ format: int64
+ type: integer
+ createdb:
+ description: |-
+ When set to `true`, the role being defined will be allowed to create
+ new databases. Specifying `false` (default) will deny a role the
+ ability to create databases.
+ type: boolean
+ createrole:
+ description: |-
+ Whether the role will be permitted to create, alter, drop, comment
+ on, change the security label for, and grant or revoke membership in
+ other roles. Default is `false`.
+ type: boolean
+ disablePassword:
+ description: DisablePassword indicates that a role's password
+ should be set to NULL in Postgres
+ type: boolean
+ ensure:
+ default: present
+ description: Ensure the role is `present` or `absent` -
+ defaults to "present"
+ enum:
+ - present
+ - absent
+ type: string
+ inRoles:
+ description: |-
+ List of one or more existing roles to which this role will be
+ immediately added as a new member. Default empty.
+ items:
+ type: string
+ type: array
+ inherit:
+ default: true
+ description: |-
+ Whether a role "inherits" the privileges of roles it is a member of.
+ Defaults is `true`.
+ type: boolean
+ login:
+ description: |-
+ Whether the role is allowed to log in. A role having the `login`
+ attribute can be thought of as a user. Roles without this attribute
+ are useful for managing database privileges, but are not users in
+ the usual sense of the word. Default is `false`.
+ type: boolean
+ name:
+ description: Name of the role
+ type: string
+ passwordSecret:
+ description: |-
+ Secret containing the password of the role (if present)
+ If null, the password will be ignored unless DisablePassword is set
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ replication:
+ description: |-
+ Whether a role is a replication role. A role must have this
+ attribute (or be a superuser) in order to be able to connect to the
+ server in replication mode (physical or logical replication) and in
+ order to be able to create or drop replication slots. A role having
+ the `replication` attribute is a very highly privileged role, and
+ should only be used on roles actually used for replication. Default
+ is `false`.
+ type: boolean
+ superuser:
+ description: |-
+ Whether the role is a `superuser` who can override all access
+ restrictions within the database - superuser status is dangerous and
+ should be used only when really needed. You must yourself be a
+ superuser to create a new superuser. Defaults is `false`.
+ type: boolean
+ validUntil:
+ description: |-
+ Date and time after which the role's password is no longer valid.
+ When omitted, the password will never expire (default).
+ format: date-time
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ type: object
+ maxSyncReplicas:
+ default: 0
+ description: |-
+ The target value for the synchronous replication quorum, that can be
+ decreased if the number of ready standbys is lower than this.
+ Undefined or 0 disable synchronous replication.
+ minimum: 0
+ type: integer
+ minSyncReplicas:
+ default: 0
+ description: |-
+ Minimum number of instances required in synchronous replication with the
+ primary. Undefined or 0 allow writes to complete when no standby is
+ available.
+ minimum: 0
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this cluster
+ properties:
+ customQueriesConfigMap:
+ description: The list of config maps containing the custom queries
+ items:
+ description: |-
+ ConfigMapKeySelector contains enough information to let you locate
+ the key of a ConfigMap
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ customQueriesSecret:
+ description: The list of secrets containing the custom queries
+ items:
+ description: |-
+ SecretKeySelector contains enough information to let you locate
+ the key of a Secret
+ properties:
+ key:
+ description: The key to select
+ type: string
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - key
+ - name
+ type: object
+ type: array
+ disableDefaultQueries:
+ default: false
+ description: |-
+ Whether the default queries should be injected.
+ Set it to `true` if you don't want to inject default queries into the cluster.
+ Default: false.
+ type: boolean
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ nodeMaintenanceWindow:
+ description: Define a maintenance window for the Kubernetes nodes
+ properties:
+ inProgress:
+ default: false
+ description: Is there a node maintenance activity in progress?
+ type: boolean
+ reusePVC:
+ default: true
+ description: |-
+ Reuse the existing PVC (wait for the node to come
+ up again) or not (recreate it elsewhere - when `instances` >1)
+ type: boolean
+ type: object
+ postgresGID:
+ default: 26
+ description: The GID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresUID:
+ default: 26
+ description: The UID of the `postgres` user inside the image, defaults
+ to `26`
+ format: int64
+ type: integer
+ postgresql:
+ description: Configuration of the PostgreSQL server
+ properties:
+ enableAlterSystem:
+ description: |-
+ If this parameter is true, the user will be able to invoke `ALTER SYSTEM`
+ on this CloudNativePG Cluster.
+ This should only be used for debugging and troubleshooting.
+ Defaults to false.
+ type: boolean
+ ldap:
+ description: Options to specify LDAP configuration
+ properties:
+ bindAsAuth:
+ description: Bind as authentication configuration
+ properties:
+ prefix:
+ description: Prefix for the bind authentication option
+ type: string
+ suffix:
+ description: Suffix for the bind authentication option
+ type: string
+ type: object
+ bindSearchAuth:
+ description: Bind+Search authentication configuration
+ properties:
+ baseDN:
+ description: Root DN to begin the user search
+ type: string
+ bindDN:
+ description: DN of the user to bind to the directory
+ type: string
+ bindPassword:
+ description: Secret with the password for the user to
+ bind to the directory
+ properties:
+ key:
+ description: The key of the secret to select from. Must
+ be a valid secret key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret or its key
+ must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ searchAttribute:
+ description: Attribute to match against the username
+ type: string
+ searchFilter:
+ description: Search filter to use when doing the search+bind
+ authentication
+ type: string
+ type: object
+ port:
+ description: LDAP server port
+ type: integer
+ scheme:
+ description: LDAP schema to be used, possible options are
+ `ldap` and `ldaps`
+ enum:
+ - ldap
+ - ldaps
+ type: string
+ server:
+ description: LDAP hostname or IP address
+ type: string
+ tls:
+ description: Set to 'true' to enable LDAP over TLS. 'false'
+ is default
+ type: boolean
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: PostgreSQL configuration options (postgresql.conf)
+ type: object
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ pg_ident:
+ description: |-
+ PostgreSQL User Name Maps rules (lines to be appended
+ to the pg_ident.conf file)
+ items:
+ type: string
+ type: array
+ promotionTimeout:
+ description: |-
+ Specifies the maximum number of seconds to wait when promoting an instance to primary.
+ Default value is 40000000, greater than one year in seconds,
+ big enough to simulate an infinite timeout
+ format: int32
+ type: integer
+ shared_preload_libraries:
+ description: Lists of shared preload libraries to add to the default
+ ones
+ items:
+ type: string
+ type: array
+ syncReplicaElectionConstraint:
+ description: |-
+ Requirements to be met by sync replicas. This will affect how the "synchronous_standby_names" parameter will be
+ set up.
+ properties:
+ enabled:
+ description: This flag enables the constraints for sync replicas
+ type: boolean
+ nodeLabelsAntiAffinity:
+ description: A list of node labels values to extract and compare
+ to evaluate if the pods reside in the same topology or not
+ items:
+ type: string
+ type: array
+ required:
+ - enabled
+ type: object
+ type: object
+ primaryUpdateMethod:
+ default: restart
+ description: |-
+ Method to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be with a switchover (`switchover`) or in-place (`restart` - default)
+ enum:
+ - switchover
+ - restart
+ type: string
+ primaryUpdateStrategy:
+ default: unsupervised
+ description: |-
+ Deployment strategy to follow to upgrade the primary server during a rolling
+ update procedure, after all replicas have been successfully updated:
+ it can be automated (`unsupervised` - default) or manual (`supervised`)
+ enum:
+ - unsupervised
+ - supervised
+ type: string
+ priorityClassName:
+ description: |-
+ Name of the priority class which will be used in every generated Pod, if the PriorityClass
+ specified does not exist, the pod will not be able to schedule. Please refer to
+ https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
+ for more information
+ type: string
+ projectedVolumeTemplate:
+ description: |-
+ Template to be used to define projected volumes, projected volumes will be mounted
+ under `/projected` base folder
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description: Projection that may be projected along with other
+ supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume root to write
+ the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the configMap data
+ to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about the downwardAPI
+ data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume file
+ items:
+ description: DownwardAPIVolumeFile represents information
+ to create the file containing the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of the
+ pod: only annotations, labels, name and namespace
+ are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the FieldPath
+ is written in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select in
+ the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must not
+ be absolute or contain the ''..'' path. Must
+ be utf-8 encoded. The first item of the relative
+ path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required for
+ volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format of
+ the exposed resources, defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ description: secret information about the secret data to
+ project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within a
+ volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional field specify whether the Secret
+ or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information about the
+ serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ replica:
+ description: Replica cluster configuration
+ properties:
+ enabled:
+ description: |-
+ If replica mode is enabled, this cluster will be a replica of an
+ existing cluster. Replica cluster can be created from a recovery
+ object store or via streaming through pg_basebackup.
+ Refer to the Replica clusters page of the documentation for more information.
+ type: boolean
+ source:
+ description: The name of the external cluster which is the replication
+ origin
+ minLength: 1
+ type: string
+ required:
+ - enabled
+ - source
+ type: object
+ replicationSlots:
+ default:
+ highAvailability:
+ enabled: true
+ description: Replication slots management configuration
+ properties:
+ highAvailability:
+ default:
+ enabled: true
+ description: Replication slots for high availability configuration
+ properties:
+ enabled:
+ default: true
+ description: |-
+ If enabled (default), the operator will automatically manage replication slots
+ on the primary instance and use them in streaming replication
+ connections with all the standby instances that are part of the HA
+ cluster. If disabled, the operator will not take advantage
+ of replication slots in streaming connections with the replicas.
+ This feature also controls replication slots in replica cluster,
+ from the designated primary to its cascading replicas.
+ type: boolean
+ slotPrefix:
+ default: _cnpg_
+ description: |-
+ Prefix for replication slots managed by the operator for HA.
+ It may only contain lower case letters, numbers, and the underscore character.
+ This can only be set at creation time. By default set to `_cnpg_`.
+ pattern: ^[0-9a-z_]*$
+ type: string
+ type: object
+ updateInterval:
+ default: 30
+ description: |-
+ Standby will update the status of the local replication slots
+ every `updateInterval` seconds (default 30).
+ minimum: 1
+ type: integer
+ type: object
+ resources:
+ description: |-
+ Resources requirements of every generated Pod. Please refer to
+ https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ for more information.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified Kubernetes
+ scheduler. If not specified, the pod will be dispatched by the default
+ scheduler. More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/
+ type: string
+ seccompProfile:
+ description: |-
+ The SeccompProfile applied to every Pod and Container.
+ Defaults to: `RuntimeDefault`
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ serviceAccountTemplate:
+ description: Configure the generation of the service account
+ properties:
+ metadata:
+ description: |-
+ Metadata are the metadata to be used for the generated
+ service account
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ required:
+ - metadata
+ type: object
+ smartShutdownTimeout:
+ default: 180
+ description: |-
+ The time in seconds that controls the window of time reserved for the smart shutdown of Postgres to complete.
+ Make sure you reserve enough time for the operator to request a fast shutdown of Postgres
+ (that is: `stopDelay` - `smartShutdownTimeout`).
+ format: int32
+ type: integer
+ startDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ successfully start up (default 3600).
+ The startup probe failure threshold is derived from this value using the formula:
+ ceiling(startDelay / 10).
+ format: int32
+ type: integer
+ stopDelay:
+ default: 1800
+ description: |-
+ The time in seconds that is allowed for a PostgreSQL instance to
+ gracefully shutdown (default 1800)
+ format: int32
+ type: integer
+ storage:
+ description: Configuration of the storage of the instances
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ superuserSecret:
+ description: |-
+ The secret containing the superuser password. If not defined a new
+ secret will be created with a randomly generated password
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ switchoverDelay:
+ default: 3600
+ description: |-
+ The time in seconds that is allowed for a primary PostgreSQL instance
+ to gracefully shutdown during a switchover.
+ Default value is 3600 seconds (1 hour).
+ format: int32
+ type: integer
+ tablespaces:
+ description: The tablespaces configuration
+ items:
+ description: |-
+ TablespaceConfiguration is the configuration of a tablespace, and includes
+ the storage specification for the tablespace
+ properties:
+ name:
+ description: The name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ properties:
+ name:
+ type: string
+ type: object
+ storage:
+ description: The storage configuration for the tablespace
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent
+ Volume Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being
+ referenced
+ type: string
+ name:
+ description: Name is the name of resource being
+ referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes
+ to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to
+ the PersistentVolume backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ temporary:
+ default: false
+ description: |-
+ When set to true, the tablespace will be added as a `temp_tablespaces`
+ entry in PostgreSQL, and will be available to automatically house temp
+ database objects, or other temporary files. Please refer to PostgreSQL
+ documentation for more information on the `temp_tablespaces` GUC.
+ type: boolean
+ required:
+ - name
+ - storage
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints specifies how to spread matching pods among the given topology.
+ More info:
+ https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
+ items:
+ description: TopologySpreadConstraint specifies how to spread matching
+ pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+
+
+ This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ walStorage:
+ description: Configuration of the storage for PostgreSQL WAL (Write-Ahead
+ Log)
+ properties:
+ pvcTemplate:
+ description: Template to be used to generate the Persistent Volume
+ Claim
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource being referenced
+ type: string
+ name:
+ description: Name is the name of resource being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over volumes to consider
+ for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label selector
+ requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the selector
+ applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference to the PersistentVolume
+ backing this claim.
+ type: string
+ type: object
+ resizeInUseVolumes:
+ default: true
+ description: Resize existent PVCs, defaults to true
+ type: boolean
+ size:
+ description: |-
+ Size of the storage. Required if not already specified in the PVC template.
+ Changes to this field are automatically reapplied to the created PVCs.
+ Size cannot be decreased.
+ type: string
+ storageClass:
+ description: |-
+ StorageClass to use for PVCs. Applied after
+ evaluating the PVC template, if available.
+ If not specified, the generated PVCs will use the
+ default storage class
+ type: string
+ type: object
+ required:
+ - instances
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the cluster. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ availableArchitectures:
+ description: AvailableArchitectures reports the available architectures
+ of a cluster
+ items:
+ description: AvailableArchitecture represents the state of a cluster's
+ architecture
+ properties:
+ goArch:
+ description: GoArch is the name of the executable architecture
+ type: string
+ hash:
+ description: Hash is the hash of the executable
+ type: string
+ required:
+ - goArch
+ - hash
+ type: object
+ type: array
+ azurePVCUpdateEnabled:
+ description: AzurePVCUpdateEnabled shows if the PVC online upgrade
+ is enabled for this cluster
+ type: boolean
+ certificates:
+ description: The configuration for the CA and related certificates,
+ initialized with defaults.
+ properties:
+ clientCASecret:
+ description: |-
+ The secret containing the Client CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate all the client certificates.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the client certificates,
+ used as `ssl_ca_file` of all the instances.
+ - `ca.key`: key used to generate client certificates, if ReplicationTLSSecret is provided,
+ this can be omitted.
+ type: string
+ expirations:
+ additionalProperties:
+ type: string
+ description: Expiration dates for all certificates.
+ type: object
+ replicationTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the client certificate to authenticate as
+ the `streaming_replica` user.
+ If not defined, ClientCASecret must provide also `ca.key`, and a new secret will be
+ created using the provided CA.
+ type: string
+ serverAltDNSNames:
+ description: The list of the server alternative DNS names to be
+ added to the generated server TLS certificates, when required.
+ items:
+ type: string
+ type: array
+ serverCASecret:
+ description: |-
+ The secret containing the Server CA certificate. If not defined, a new secret will be created
+ with a self-signed CA and will be used to generate the TLS certificate ServerTLSSecret.
+
+ Contains:
+
+ - `ca.crt`: CA that should be used to validate the server certificate,
+ used as `sslrootcert` in client connection strings.
+ - `ca.key`: key used to generate Server SSL certs, if ServerTLSSecret is provided,
+ this can be omitted.
+ type: string
+ serverTLSSecret:
+ description: |-
+ The secret of type kubernetes.io/tls containing the server TLS certificate and key that will be set as
+ `ssl_cert_file` and `ssl_key_file` so that clients can connect to postgres securely.
+ If not defined, ServerCASecret must provide also `ca.key` and a new secret will be
+ created using the provided CA.
+ type: string
+ type: object
+ cloudNativePGCommitHash:
+ description: The commit hash number of which this operator running
+ type: string
+ cloudNativePGOperatorHash:
+ description: The hash of the binary of the operator
+ type: string
+ conditions:
+ description: Conditions for cluster object
+ items:
+ description: "Condition contains details for one aspect of the current
+ state of this API Resource.\n---\nThis struct is intended for
+ direct use as an array at the field path .status.conditions. For
+ example,\n\n\n\ttype FooStatus struct{\n\t // Represents the
+ observations of a foo's current state.\n\t // Known .status.conditions.type
+ are: \"Available\", \"Progressing\", and \"Degraded\"\n\t //
+ +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t
+ \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\"
+ patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t
+ \ // other fields\n\t}"
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: |-
+ type of condition in CamelCase or in foo.example.com/CamelCase.
+ ---
+ Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be
+ useful (see .node.status.conditions), the ability to deconflict is important.
+ The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ configMapResourceVersion:
+ description: |-
+ The list of resource versions of the configmaps,
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ configmap data
+ properties:
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the config maps used to pass metrics.
+ Map keys are the config map names, map values are the versions
+ type: object
+ type: object
+ currentPrimary:
+ description: Current primary instance
+ type: string
+ currentPrimaryFailingSinceTimestamp:
+ description: |-
+ The timestamp when the primary was detected to be unhealthy
+ This field is reported when `.spec.failoverDelay` is populated or during online upgrades
+ type: string
+ currentPrimaryTimestamp:
+ description: The timestamp when the last actual promotion to primary
+ has occurred
+ type: string
+ danglingPVC:
+ description: |-
+ List of all the PVCs created by this cluster and still available
+ which are not attached to a Pod
+ items:
+ type: string
+ type: array
+ firstRecoverabilityPoint:
+ description: |-
+ The first recoverability point, stored as a date in RFC3339 format.
+ This field is calculated from the content of FirstRecoverabilityPointByMethod
+ type: string
+ firstRecoverabilityPointByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: The first recoverability point, stored as a date in RFC3339
+ format, per backup method type
+ type: object
+ healthyPVC:
+ description: List of all the PVCs not dangling nor initializing
+ items:
+ type: string
+ type: array
+ initializingPVC:
+ description: List of all the PVCs that are being initialized by this
+ cluster
+ items:
+ type: string
+ type: array
+ instanceNames:
+ description: List of instance names in the cluster
+ items:
+ type: string
+ type: array
+ instances:
+ description: The total number of PVC Groups detected in the cluster.
+ It may differ from the number of existing instance pods.
+ type: integer
+ instancesReportedState:
+ additionalProperties:
+ description: InstanceReportedState describes the last reported state
+ of an instance during a reconciliation loop
+ properties:
+ isPrimary:
+ description: indicates if an instance is the primary one
+ type: boolean
+ timeLineID:
+ description: indicates on which TimelineId the instance is
+ type: integer
+ required:
+ - isPrimary
+ type: object
+ description: The reported state of the instances during the last reconciliation
+ loop
+ type: object
+ instancesStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: InstancesStatus indicates in which status the instances
+ are
+ type: object
+ jobCount:
+ description: How many Jobs have been created by this cluster
+ format: int32
+ type: integer
+ lastFailedBackup:
+ description: Stored as a date in RFC3339 format
+ type: string
+ lastSuccessfulBackup:
+ description: |-
+ Last successful backup, stored as a date in RFC3339 format
+ This field is calculated from the content of LastSuccessfulBackupByMethod
+ type: string
+ lastSuccessfulBackupByMethod:
+ additionalProperties:
+ format: date-time
+ type: string
+ description: Last successful backup, stored as a date in RFC3339 format,
+ per backup method type
+ type: object
+ latestGeneratedNode:
+ description: ID of the latest generated node (used to avoid node name
+ clashing)
+ type: integer
+ managedRolesStatus:
+ description: ManagedRolesStatus reports the state of the managed roles
+ in the cluster
+ properties:
+ byStatus:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: ByStatus gives the list of roles in each state
+ type: object
+ cannotReconcile:
+ additionalProperties:
+ items:
+ type: string
+ type: array
+ description: |-
+ CannotReconcile lists roles that cannot be reconciled in PostgreSQL,
+ with an explanation of the cause
+ type: object
+ passwordStatus:
+ additionalProperties:
+ description: PasswordState represents the state of the password
+ of a managed RoleConfiguration
+ properties:
+ resourceVersion:
+ description: the resource version of the password secret
+ type: string
+ transactionID:
+ description: the last transaction ID to affect the role
+ definition in PostgreSQL
+ format: int64
+ type: integer
+ type: object
+ description: PasswordStatus gives the last transaction id and
+ password secret version for each managed role
+ type: object
+ type: object
+ onlineUpdateEnabled:
+ description: OnlineUpdateEnabled shows if the online upgrade is enabled
+ inside the cluster
+ type: boolean
+ phase:
+ description: Current phase of the cluster
+ type: string
+ phaseReason:
+ description: Reason for the current phase
+ type: string
+ poolerIntegrations:
+ description: The integration needed by poolers referencing the cluster
+ properties:
+ pgBouncerIntegration:
+ description: PgBouncerIntegrationStatus encapsulates the needed
+ integration for the pgbouncer poolers referencing the cluster
+ properties:
+ secrets:
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ pvcCount:
+ description: How many PVCs have been created by this cluster
+ format: int32
+ type: integer
+ readService:
+ description: Current list of read pods
+ type: string
+ readyInstances:
+ description: The total number of ready instances in the cluster. It
+ is equal to the number of ready instance pods.
+ type: integer
+ resizingPVC:
+ description: List of all the PVCs that have ResizingPVC condition.
+ items:
+ type: string
+ type: array
+ secretsResourceVersion:
+ description: |-
+ The list of resource versions of the secrets
+ managed by the operator. Every change here is done in the
+ interest of the instance manager, which will refresh the
+ secret data
+ properties:
+ applicationSecretVersion:
+ description: The resource version of the "app" user secret
+ type: string
+ barmanEndpointCA:
+ description: The resource version of the Barman Endpoint CA if
+ provided
+ type: string
+ caSecretVersion:
+ description: Unused. Retained for compatibility with old versions.
+ type: string
+ clientCaSecretVersion:
+ description: The resource version of the PostgreSQL client-side
+ CA secret version
+ type: string
+ externalClusterSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the external cluster secrets
+ type: object
+ managedRoleSecretVersion:
+ additionalProperties:
+ type: string
+ description: The resource versions of the managed roles secrets
+ type: object
+ metrics:
+ additionalProperties:
+ type: string
+ description: |-
+ A map with the versions of all the secrets used to pass metrics.
+ Map keys are the secret names, map values are the versions
+ type: object
+ replicationSecretVersion:
+ description: The resource version of the "streaming_replica" user
+ secret
+ type: string
+ serverCaSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ CA secret version
+ type: string
+ serverSecretVersion:
+ description: The resource version of the PostgreSQL server-side
+ secret version
+ type: string
+ superuserSecretVersion:
+ description: The resource version of the "postgres" user secret
+ type: string
+ type: object
+ tablespacesStatus:
+ description: TablespacesStatus reports the state of the declarative
+ tablespaces in the cluster
+ items:
+ description: TablespaceState represents the state of a tablespace
+ in a cluster
+ properties:
+ error:
+ description: Error is the reconciliation error, if any
+ type: string
+ name:
+ description: Name is the name of the tablespace
+ type: string
+ owner:
+ description: Owner is the PostgreSQL user owning the tablespace
+ type: string
+ state:
+ description: State is the latest reconciliation state
+ type: string
+ required:
+ - name
+ - state
+ type: object
+ type: array
+ targetPrimary:
+ description: |-
+ Target primary instance, this is different from the previous one
+ during a switchover or a failover
+ type: string
+ targetPrimaryTimestamp:
+ description: The timestamp when the last request for a new primary
+ has occurred
+ type: string
+ timelineID:
+ description: The timeline of the Postgres cluster
+ type: integer
+ topology:
+ description: Instances topology.
+ properties:
+ instances:
+ additionalProperties:
+ additionalProperties:
+ type: string
+ description: PodTopologyLabels represent the topology of a Pod.
+ map[labelName]labelValue
+ type: object
+ description: Instances contains the pod topology of the instances
+ type: object
+ nodesUsed:
+ description: |-
+ NodesUsed represents the count of distinct nodes accommodating the instances.
+ A value of '1' suggests that all instances are hosted on a single node,
+ implying the absence of High Availability (HA). Ideally, this value should
+ be the same as the number of instances in the Postgres HA cluster, implying
+ shared nothing architecture on the compute side.
+ format: int32
+ type: integer
+ successfullyExtracted:
+ description: |-
+ SuccessfullyExtracted indicates if the topology data was extract. It is useful to enact fallback behaviors
+ in synchronous replica election in case of failures
+ type: boolean
+ type: object
+ unusablePVC:
+ description: List of all the PVCs that are unusable because another
+ PVC is missing
+ items:
+ type: string
+ type: array
+ writeService:
+ description: Current write pod
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: poolers.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: Pooler
+ listKind: PoolerList
+ plural: poolers
+ singular: pooler
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .spec.type
+ name: Type
+ type: string
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: Pooler is the Schema for the poolers API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the Pooler.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ cluster:
+ description: |-
+ This is the cluster reference on which the Pooler will work.
+ Pooler name should never match with any cluster name within the same namespace.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ deploymentStrategy:
+ description: The deployment strategy to use for pgbouncer to replace
+ existing pods with new ones
+ properties:
+ rollingUpdate:
+ description: |-
+ Rolling update config params. Present only if DeploymentStrategyType =
+ RollingUpdate.
+ ---
+ TODO: Update this to follow our convention for oneOf, whatever we decide it
+ to be.
+ properties:
+ maxSurge:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be scheduled above the desired number of
+ pods.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ This can not be 0 if MaxUnavailable is 0.
+ Absolute number is calculated from percentage by rounding up.
+ Defaults to 25%.
+ Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when
+ the rolling update starts, such that the total number of old and new pods do not exceed
+ 130% of desired pods. Once old pods have been killed,
+ new ReplicaSet can be scaled up further, ensuring that total number of pods running
+ at any time during the update is at most 130% of desired pods.
+ x-kubernetes-int-or-string: true
+ maxUnavailable:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ The maximum number of pods that can be unavailable during the update.
+ Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%).
+ Absolute number is calculated from percentage by rounding down.
+ This can not be 0 if MaxSurge is 0.
+ Defaults to 25%.
+ Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
+ immediately when the rolling update starts. Once new pods are ready, old ReplicaSet
+ can be scaled down further, followed by scaling up the new ReplicaSet, ensuring
+ that the total number of pods available at all times during the update is at
+ least 70% of desired pods.
+ x-kubernetes-int-or-string: true
+ type: object
+ type:
+ description: Type of deployment. Can be "Recreate" or "RollingUpdate".
+ Default is RollingUpdate.
+ type: string
+ type: object
+ instances:
+ default: 1
+ description: 'The number of replicas we want. Default: 1.'
+ format: int32
+ type: integer
+ monitoring:
+ description: The configuration of the monitoring infrastructure of
+ this pooler.
+ properties:
+ enablePodMonitor:
+ default: false
+ description: Enable or disable the `PodMonitor`
+ type: boolean
+ podMonitorMetricRelabelings:
+ description: The list of metric relabelings for the `PodMonitor`.
+ Applied to samples before ingestion.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ podMonitorRelabelings:
+ description: The list of relabelings for the `PodMonitor`. Applied
+ to samples before scraping.
+ items:
+ description: |-
+ RelabelConfig allows dynamic rewriting of the label set for targets, alerts,
+ scraped samples and remote write samples.
+
+
+ More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
+ properties:
+ action:
+ default: replace
+ description: |-
+ Action to perform based on the regex matching.
+
+
+ `Uppercase` and `Lowercase` actions require Prometheus >= v2.36.0.
+ `DropEqual` and `KeepEqual` actions require Prometheus >= v2.41.0.
+
+
+ Default: "Replace"
+ enum:
+ - replace
+ - Replace
+ - keep
+ - Keep
+ - drop
+ - Drop
+ - hashmod
+ - HashMod
+ - labelmap
+ - LabelMap
+ - labeldrop
+ - LabelDrop
+ - labelkeep
+ - LabelKeep
+ - lowercase
+ - Lowercase
+ - uppercase
+ - Uppercase
+ - keepequal
+ - KeepEqual
+ - dropequal
+ - DropEqual
+ type: string
+ modulus:
+ description: |-
+ Modulus to take of the hash of the source label values.
+
+
+ Only applicable when the action is `HashMod`.
+ format: int64
+ type: integer
+ regex:
+ description: Regular expression against which the extracted
+ value is matched.
+ type: string
+ replacement:
+ description: |-
+ Replacement value against which a Replace action is performed if the
+ regular expression matches.
+
+
+ Regex capture groups are available.
+ type: string
+ separator:
+ description: Separator is the string between concatenated
+ SourceLabels.
+ type: string
+ sourceLabels:
+ description: |-
+ The source labels select values from existing labels. Their content is
+ concatenated using the configured Separator and matched against the
+ configured regular expression.
+ items:
+ description: |-
+ LabelName is a valid Prometheus label name which may only contain ASCII
+ letters, numbers, as well as underscores.
+ pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$
+ type: string
+ type: array
+ targetLabel:
+ description: |-
+ Label to which the resulting string is written in a replacement.
+
+
+ It is mandatory for `Replace`, `HashMod`, `Lowercase`, `Uppercase`,
+ `KeepEqual` and `DropEqual` actions.
+
+
+ Regex capture groups are available.
+ type: string
+ type: object
+ type: array
+ type: object
+ pgbouncer:
+ description: The PgBouncer configuration
+ properties:
+ authQuery:
+ description: |-
+ The query that will be used to download the hash of the password
+ of a certain user. Default: "SELECT usename, passwd FROM public.user_search($1)".
+ In case it is specified, also an AuthQuerySecret has to be specified and
+ no automatic CNPG Cluster integration will be triggered.
+ type: string
+ authQuerySecret:
+ description: |-
+ The credentials of the user that need to be used for the authentication
+ query. In case it is specified, also an AuthQuery
+ (e.g. "SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1")
+ has to be specified and no automatic CNPG Cluster integration will be triggered.
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ parameters:
+ additionalProperties:
+ type: string
+ description: |-
+ Additional parameters to be passed to PgBouncer - please check
+ the CNPG documentation for a list of options you can configure
+ type: object
+ paused:
+ default: false
+ description: |-
+ When set to `true`, PgBouncer will disconnect from the PostgreSQL
+ server, first waiting for all queries to complete, and pause all new
+ client connections until this value is set to `false` (default). Internally,
+ the operator calls PgBouncer's `PAUSE` and `RESUME` commands.
+ type: boolean
+ pg_hba:
+ description: |-
+ PostgreSQL Host Based Authentication rules (lines to be appended
+ to the pg_hba.conf file)
+ items:
+ type: string
+ type: array
+ poolMode:
+ default: session
+ description: 'The pool mode. Default: `session`.'
+ enum:
+ - session
+ - transaction
+ type: string
+ type: object
+ template:
+ description: The template of the Pod to be created
+ properties:
+ metadata:
+ description: |-
+ Standard object's metadata.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ properties:
+ annotations:
+ additionalProperties:
+ type: string
+ description: |-
+ Annotations is an unstructured key value map stored with a resource that may be
+ set by external tools to store and retrieve arbitrary metadata. They are not
+ queryable and should be preserved when modifying objects.
+ More info: http://kubernetes.io/docs/user-guide/annotations
+ type: object
+ labels:
+ additionalProperties:
+ type: string
+ description: |-
+ Map of string keys and values that can be used to organize and categorize
+ (scope and select) objects. May match selectors of replication controllers
+ and services.
+ More info: http://kubernetes.io/docs/user-guide/labels
+ type: object
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the pod.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ activeDeadlineSeconds:
+ description: |-
+ Optional duration in seconds the pod may be active on the node relative to
+ StartTime before the system will actively try to mark it failed and kill associated containers.
+ Value must be a positive integer.
+ format: int64
+ type: integer
+ affinity:
+ description: If specified, the pod's scheduling constraints
+ properties:
+ nodeAffinity:
+ description: Describes node affinity scheduling rules
+ for the pod.
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node matches the corresponding matchExpressions; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: |-
+ An empty preferred scheduling term matches all objects with implicit weight 0
+ (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
+ properties:
+ preference:
+ description: A node selector term, associated
+ with the corresponding weight.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ x-kubernetes-map-type: atomic
+ weight:
+ description: Weight associated with matching
+ the corresponding nodeSelectorTerm, in the
+ range 1-100.
+ format: int32
+ type: integer
+ required:
+ - preference
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to an update), the system
+ may or may not try to eventually evict the pod from its node.
+ properties:
+ nodeSelectorTerms:
+ description: Required. A list of node selector
+ terms. The terms are ORed.
+ items:
+ description: |-
+ A null or empty node selector term matches no objects. The requirements of
+ them are ANDed.
+ The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+ properties:
+ matchExpressions:
+ description: A list of node selector requirements
+ by node's labels.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchFields:
+ description: A list of node selector requirements
+ by node's fields.
+ items:
+ description: |-
+ A node selector requirement is a selector that contains values, a key, and an operator
+ that relates the key and values.
+ properties:
+ key:
+ description: The label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ Represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
+ type: string
+ values:
+ description: |-
+ An array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. If the operator is Gt or Lt, the values
+ array must have a single element, which will be interpreted as an integer.
+ This array is replaced during a strategic merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ required:
+ - nodeSelectorTerms
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ podAffinity:
+ description: Describes pod affinity scheduling rules (e.g.
+ co-locate this pod in the same node, zone, etc. as some
+ other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ podAntiAffinity:
+ description: Describes pod anti-affinity scheduling rules
+ (e.g. avoid putting this pod in the same node, zone,
+ etc. as some other pod(s)).
+ properties:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ The scheduler will prefer to schedule pods to nodes that satisfy
+ the anti-affinity expressions specified by this field, but it may choose
+ a node that violates one or more of the expressions. The node that is
+ most preferred is the one with the greatest sum of weights, i.e.
+ for each node that meets all of the scheduling requirements (resource
+ request, requiredDuringScheduling anti-affinity expressions, etc.),
+ compute a sum by iterating through the elements of this field and adding
+ "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the
+ node(s) with the highest sum are the most preferred.
+ items:
+ description: The weights of all of the matched WeightedPodAffinityTerm
+ fields are added per-node to find the most preferred
+ node(s)
+ properties:
+ podAffinityTerm:
+ description: Required. A pod affinity term,
+ associated with the corresponding weight.
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ weight:
+ description: |-
+ weight associated with matching the corresponding podAffinityTerm,
+ in the range 1-100.
+ format: int32
+ type: integer
+ required:
+ - podAffinityTerm
+ - weight
+ type: object
+ type: array
+ requiredDuringSchedulingIgnoredDuringExecution:
+ description: |-
+ If the anti-affinity requirements specified by this field are not met at
+ scheduling time, the pod will not be scheduled onto the node.
+ If the anti-affinity requirements specified by this field cease to be met
+ at some point during pod execution (e.g. due to a pod label update), the
+ system may or may not try to eventually evict the pod from its node.
+ When there are multiple elements, the lists of nodes corresponding to each
+ podAffinityTerm are intersected, i.e. all terms must be satisfied.
+ items:
+ description: |-
+ Defines a set of pods (namely those matching the labelSelector
+ relative to the given namespace(s)) that this pod should be
+ co-located (affinity) or not co-located (anti-affinity) with,
+ where co-located is defined as running on a node whose value of
+ the label with key matches that of any node on which
+ a pod of the set of pods is running
+ properties:
+ labelSelector:
+ description: |-
+ A label query over a set of resources, in this case pods.
+ If it's null, this PodAffinityTerm matches with no Pods.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ Also, MatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ mismatchLabelKeys:
+ description: |-
+ MismatchLabelKeys is a set of pod label keys to select which pods will
+ be taken into consideration. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)`
+ to select the group of existing pods which pods will be taken into consideration
+ for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming
+ pod labels will be ignored. The default value is empty.
+ The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector.
+ Also, MismatchLabelKeys cannot be set when LabelSelector isn't set.
+ This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ namespaceSelector:
+ description: |-
+ A label query over the set of namespaces that the term applies to.
+ The term is applied to the union of the namespaces selected by this field
+ and the ones listed in the namespaces field.
+ null selector and null or empty namespaces list means "this pod's namespace".
+ An empty selector ({}) matches all namespaces.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The requirements
+ are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key
+ that the selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ namespaces:
+ description: |-
+ namespaces specifies a static list of namespace names that the term applies to.
+ The term is applied to the union of the namespaces listed in this field
+ and the ones selected by namespaceSelector.
+ null or empty namespaces list and null namespaceSelector means "this pod's namespace".
+ items:
+ type: string
+ type: array
+ topologyKey:
+ description: |-
+ This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching
+ the labelSelector in the specified namespaces, where co-located is defined as running on a node
+ whose value of the label with key topologyKey matches that of any node on which any of the
+ selected pods is running.
+ Empty topologyKey is not allowed.
+ type: string
+ required:
+ - topologyKey
+ type: object
+ type: array
+ type: object
+ type: object
+ automountServiceAccountToken:
+ description: AutomountServiceAccountToken indicates whether
+ a service account token should be automatically mounted.
+ type: boolean
+ containers:
+ description: |-
+ List of containers belonging to the pod.
+ Containers cannot currently be added or removed.
+ There must be at least one container in a Pod.
+ Cannot be updated.
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ dnsConfig:
+ description: |-
+ Specifies the DNS parameters of a pod.
+ Parameters specified here will be merged to the generated DNS
+ configuration based on DNSPolicy.
+ properties:
+ nameservers:
+ description: |-
+ A list of DNS name server IP addresses.
+ This will be appended to the base nameservers generated from DNSPolicy.
+ Duplicated nameservers will be removed.
+ items:
+ type: string
+ type: array
+ options:
+ description: |-
+ A list of DNS resolver options.
+ This will be merged with the base options generated from DNSPolicy.
+ Duplicated entries will be removed. Resolution options given in Options
+ will override those that appear in the base DNSPolicy.
+ items:
+ description: PodDNSConfigOption defines DNS resolver
+ options of a pod.
+ properties:
+ name:
+ description: Required.
+ type: string
+ value:
+ type: string
+ type: object
+ type: array
+ searches:
+ description: |-
+ A list of DNS search domains for host-name lookup.
+ This will be appended to the base search paths generated from DNSPolicy.
+ Duplicated search paths will be removed.
+ items:
+ type: string
+ type: array
+ type: object
+ dnsPolicy:
+ description: |-
+ Set DNS policy for the pod.
+ Defaults to "ClusterFirst".
+ Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'.
+ DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy.
+ To have DNS options set along with hostNetwork, you have to specify DNS policy
+ explicitly to 'ClusterFirstWithHostNet'.
+ type: string
+ enableServiceLinks:
+ description: |-
+ EnableServiceLinks indicates whether information about services should be injected into pod's
+ environment variables, matching the syntax of Docker links.
+ Optional: Defaults to true.
+ type: boolean
+ ephemeralContainers:
+ description: |-
+ List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing
+ pod to perform user-initiated actions such as debugging. This list cannot be specified when
+ creating a pod, and it cannot be modified by updating the pod spec. In order to add an
+ ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.
+ items:
+ description: |-
+ An EphemeralContainer is a temporary container that you may add to an existing Pod for
+ user-initiated activities such as debugging. Ephemeral containers have no resource or
+ scheduling guarantees, and they will not be restarted when they exit or when a Pod is
+ removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the
+ Pod to exceed its resource allocation.
+
+
+ To add an ephemeral container, use the ephemeralcontainers subresource of an existing
+ Pod. Ephemeral containers may not be removed or restarted.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: Lifecycle is not allowed for ephemeral
+ containers.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the ephemeral container specified as a DNS_LABEL.
+ This name must be unique among all containers, init containers and ephemeral containers.
+ type: string
+ ports:
+ description: Ports are not allowed for ephemeral containers.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources
+ already allocated to the pod.
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ Restart policy for the container to manage the restart behavior of each
+ container within a pod.
+ This may only be set for init containers. You cannot set this field on
+ ephemeral containers.
+ type: string
+ securityContext:
+ description: |-
+ Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: Probes are not allowed for ephemeral containers.
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ targetContainerName:
+ description: |-
+ If set, the name of the container from PodSpec that this ephemeral container targets.
+ The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container.
+ If not set then the ephemeral container uses the namespaces configured in the Pod spec.
+
+
+ The container runtime must implement support for this feature. If the runtime does not
+ support namespace targeting then the result of setting this field is undefined.
+ type: string
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ hostAliases:
+ description: |-
+ HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts
+ file if specified. This is only valid for non-hostNetwork pods.
+ items:
+ description: |-
+ HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
+ pod's hosts file.
+ properties:
+ hostnames:
+ description: Hostnames for the above IP address.
+ items:
+ type: string
+ type: array
+ ip:
+ description: IP address of the host file entry.
+ type: string
+ type: object
+ type: array
+ hostIPC:
+ description: |-
+ Use the host's ipc namespace.
+ Optional: Default to false.
+ type: boolean
+ hostNetwork:
+ description: |-
+ Host networking requested for this pod. Use the host's network namespace.
+ If this option is set, the ports that will be used must be specified.
+ Default to false.
+ type: boolean
+ hostPID:
+ description: |-
+ Use the host's pid namespace.
+ Optional: Default to false.
+ type: boolean
+ hostUsers:
+ description: |-
+ Use the host's user namespace.
+ Optional: Default to true.
+ If set to true or not present, the pod will be run in the host user namespace, useful
+ for when the pod needs a feature only available to the host user namespace, such as
+ loading a kernel module with CAP_SYS_MODULE.
+ When set to false, a new userns is created for the pod. Setting false is useful for
+ mitigating container breakout vulnerabilities even allowing users to run their
+ containers as root without actually having root privileges on the host.
+ This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.
+ type: boolean
+ hostname:
+ description: |-
+ Specifies the hostname of the Pod
+ If not specified, the pod's hostname will be set to a system-defined value.
+ type: string
+ imagePullSecrets:
+ description: |-
+ ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec.
+ If specified, these secrets will be passed to individual puller implementations for them to use.
+ More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod
+ items:
+ description: |-
+ LocalObjectReference contains enough information to let you locate the
+ referenced object inside the same namespace.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ type: array
+ initContainers:
+ description: |-
+ List of initialization containers belonging to the pod.
+ Init containers are executed in order prior to containers being started. If any
+ init container fails, the pod is considered to have failed and is handled according
+ to its restartPolicy. The name for an init container or normal container must be
+ unique among all containers.
+ Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes.
+ The resourceRequirements of an init container are taken into account during scheduling
+ by finding the highest request/limit for each resource type, and then using the max of
+ of that value or the sum of the normal containers. Limits are applied to init containers
+ in a similar fashion.
+ Init containers cannot currently be added or removed.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/
+ items:
+ description: A single application container that you want
+ to run within a pod.
+ properties:
+ args:
+ description: |-
+ Arguments to the entrypoint.
+ The container image's CMD is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ command:
+ description: |-
+ Entrypoint array. Not executed within a shell.
+ The container image's ENTRYPOINT is used if this is not provided.
+ Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
+ cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ of whether the variable exists or not. Cannot be updated.
+ More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
+ items:
+ type: string
+ type: array
+ env:
+ description: |-
+ List of environment variables to set in the container.
+ Cannot be updated.
+ items:
+ description: EnvVar represents an environment variable
+ present in a Container.
+ properties:
+ name:
+ description: Name of the environment variable.
+ Must be a C_IDENTIFIER.
+ type: string
+ value:
+ description: |-
+ Variable references $(VAR_NAME) are expanded
+ using the previously defined environment variables in the container and
+ any service environment variables. If a variable cannot be resolved,
+ the reference in the input string will be unchanged. Double $$ are reduced
+ to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ Escaped references will never be expanded, regardless of whether the variable
+ exists or not.
+ Defaults to "".
+ type: string
+ valueFrom:
+ description: Source for the environment variable's
+ value. Cannot be used if value is not empty.
+ properties:
+ configMapKeyRef:
+ description: Selects a key of a ConfigMap.
+ properties:
+ key:
+ description: The key to select.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ fieldRef:
+ description: |-
+ Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`,
+ spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ secretKeyRef:
+ description: Selects a key of a secret in
+ the pod's namespace
+ properties:
+ key:
+ description: The key of the secret to
+ select from. Must be a valid secret
+ key.
+ type: string
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret
+ or its key must be defined
+ type: boolean
+ required:
+ - key
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ envFrom:
+ description: |-
+ List of sources to populate environment variables in the container.
+ The keys defined within a source must be a C_IDENTIFIER. All invalid keys
+ will be reported as an event when the container is starting. When a key exists in multiple
+ sources, the value associated with the last source will take precedence.
+ Values defined by an Env with a duplicate key will take precedence.
+ Cannot be updated.
+ items:
+ description: EnvFromSource represents the source of
+ a set of ConfigMaps
+ properties:
+ configMapRef:
+ description: The ConfigMap to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the ConfigMap
+ must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ prefix:
+ description: An optional identifier to prepend
+ to each key in the ConfigMap. Must be a C_IDENTIFIER.
+ type: string
+ secretRef:
+ description: The Secret to select from
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: Specify whether the Secret must
+ be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ type: object
+ type: array
+ image:
+ description: |-
+ Container image name.
+ More info: https://kubernetes.io/docs/concepts/containers/images
+ This field is optional to allow higher level config management to default or override
+ container images in workload controllers like Deployments and StatefulSets.
+ type: string
+ imagePullPolicy:
+ description: |-
+ Image pull policy.
+ One of Always, Never, IfNotPresent.
+ Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
+ type: string
+ lifecycle:
+ description: |-
+ Actions that the management system should take in response to container lifecycle events.
+ Cannot be updated.
+ properties:
+ postStart:
+ description: |-
+ PostStart is called immediately after a container is created. If the handler fails,
+ the container is terminated and restarted according to its restart policy.
+ Other management of the container blocks until the hook completes.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ preStop:
+ description: |-
+ PreStop is called immediately before a container is terminated due to an
+ API request or management event such as liveness/startup probe failure,
+ preemption, resource contention, etc. The handler is not called if the
+ container crashes or exits. The Pod's termination grace period countdown begins before the
+ PreStop hook is executed. Regardless of the outcome of the handler, the
+ container will eventually terminate within the Pod's termination grace
+ period (unless delayed by finalizers). Other management of the container blocks until the hook completes
+ or until the termination grace period is reached.
+ More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the
+ request. HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP
+ server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ sleep:
+ description: Sleep represents the duration that
+ the container should sleep before being terminated.
+ properties:
+ seconds:
+ description: Seconds is the number of seconds
+ to sleep.
+ format: int64
+ type: integer
+ required:
+ - seconds
+ type: object
+ tcpSocket:
+ description: |-
+ Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept
+ for the backward compatibility. There are no validation of this field and
+ lifecycle hooks will fail in runtime when tcp handler is specified.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ type: object
+ type: object
+ livenessProbe:
+ description: |-
+ Periodic probe of container liveness.
+ Container will be restarted if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ name:
+ description: |-
+ Name of the container specified as a DNS_LABEL.
+ Each container in a pod must have a unique name (DNS_LABEL).
+ Cannot be updated.
+ type: string
+ ports:
+ description: |-
+ List of ports to expose from the container. Not specifying a port here
+ DOES NOT prevent that port from being exposed. Any port which is
+ listening on the default "0.0.0.0" address inside a container will be
+ accessible from the network.
+ Modifying this array with strategic merge patch may corrupt the data.
+ For more information See https://github.com/kubernetes/kubernetes/issues/108255.
+ Cannot be updated.
+ items:
+ description: ContainerPort represents a network port
+ in a single container.
+ properties:
+ containerPort:
+ description: |-
+ Number of port to expose on the pod's IP address.
+ This must be a valid port number, 0 < x < 65536.
+ format: int32
+ type: integer
+ hostIP:
+ description: What host IP to bind the external
+ port to.
+ type: string
+ hostPort:
+ description: |-
+ Number of port to expose on the host.
+ If specified, this must be a valid port number, 0 < x < 65536.
+ If HostNetwork is specified, this must match ContainerPort.
+ Most containers do not need this.
+ format: int32
+ type: integer
+ name:
+ description: |-
+ If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
+ named port in a pod must have a unique name. Name for the port that can be
+ referred to by services.
+ type: string
+ protocol:
+ default: TCP
+ description: |-
+ Protocol for port. Must be UDP, TCP, or SCTP.
+ Defaults to "TCP".
+ type: string
+ required:
+ - containerPort
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - containerPort
+ - protocol
+ x-kubernetes-list-type: map
+ readinessProbe:
+ description: |-
+ Periodic probe of container service readiness.
+ Container will be removed from service endpoints if the probe fails.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ resizePolicy:
+ description: Resources resize policy for the container.
+ items:
+ description: ContainerResizePolicy represents resource
+ resize policy for the container.
+ properties:
+ resourceName:
+ description: |-
+ Name of the resource to which this resource resize policy applies.
+ Supported values: cpu, memory.
+ type: string
+ restartPolicy:
+ description: |-
+ Restart policy to apply when specified resource is resized.
+ If not specified, it defaults to NotRequired.
+ type: string
+ required:
+ - resourceName
+ - restartPolicy
+ type: object
+ type: array
+ x-kubernetes-list-type: atomic
+ resources:
+ description: |-
+ Compute Resources required by this container.
+ Cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ properties:
+ claims:
+ description: |-
+ Claims lists the names of resources, defined in spec.resourceClaims,
+ that are used by this container.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable. It can only be set for containers.
+ items:
+ description: ResourceClaim references one entry
+ in PodSpec.ResourceClaims.
+ properties:
+ name:
+ description: |-
+ Name must match the name of one entry in pod.spec.resourceClaims of
+ the Pod where this field is used. It makes that resource available
+ inside a container.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ restartPolicy:
+ description: |-
+ RestartPolicy defines the restart behavior of individual containers in a pod.
+ This field may only be set for init containers, and the only allowed value is "Always".
+ For non-init containers or when this field is not specified,
+ the restart behavior is defined by the Pod's restart policy and the container type.
+ Setting the RestartPolicy as "Always" for the init container will have the following effect:
+ this init container will be continually restarted on
+ exit until all regular containers have terminated. Once all regular
+ containers have completed, all init containers with restartPolicy "Always"
+ will be shut down. This lifecycle differs from normal init containers and
+ is often referred to as a "sidecar" container. Although this init
+ container still starts in the init container sequence, it does not wait
+ for the container to complete before proceeding to the next init
+ container. Instead, the next init container starts immediately after this
+ init container is started, or after any startupProbe has successfully
+ completed.
+ type: string
+ securityContext:
+ description: |-
+ SecurityContext defines the security options the container should be run with.
+ If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
+ properties:
+ allowPrivilegeEscalation:
+ description: |-
+ AllowPrivilegeEscalation controls whether a process can gain more
+ privileges than its parent process. This bool directly controls if
+ the no_new_privs flag will be set on the container process.
+ AllowPrivilegeEscalation is true always when the container is:
+ 1) run as Privileged
+ 2) has CAP_SYS_ADMIN
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ capabilities:
+ description: |-
+ The capabilities to add/drop when running containers.
+ Defaults to the default set of capabilities granted by the container runtime.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ add:
+ description: Added capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ drop:
+ description: Removed capabilities
+ items:
+ description: Capability represent POSIX capabilities
+ type
+ type: string
+ type: array
+ type: object
+ privileged:
+ description: |-
+ Run container in privileged mode.
+ Processes in privileged containers are essentially equivalent to root on the host.
+ Defaults to false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ procMount:
+ description: |-
+ procMount denotes the type of proc mount to use for the containers.
+ The default is DefaultProcMount which uses the container runtime defaults for
+ readonly paths and masked paths.
+ This requires the ProcMountType feature flag to be enabled.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ readOnlyRootFilesystem:
+ description: |-
+ Whether this container has a read-only root filesystem.
+ Default is false.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: boolean
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to the container.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that
+ applies to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that
+ applies to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that
+ applies to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that
+ applies to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by this container. If seccomp options are
+ provided at both the pod & container level, the container options
+ override the pod options.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options from the PodSecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name
+ of the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ startupProbe:
+ description: |-
+ StartupProbe indicates that the Pod has successfully initialized.
+ If specified, no other probes are executed until this completes successfully.
+ If this probe fails, the Pod will be restarted, just as if the livenessProbe failed.
+ This can be used to provide different probe parameters at the beginning of a Pod's lifecycle,
+ when it might take a long time to load data or warm a cache, than during steady-state operation.
+ This cannot be updated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ properties:
+ exec:
+ description: Exec specifies the action to take.
+ properties:
+ command:
+ description: |-
+ Command is the command line to execute inside the container, the working directory for the
+ command is root ('/') in the container's filesystem. The command is simply exec'd, it is
+ not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use
+ a shell, you need to explicitly call out to that shell.
+ Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
+ items:
+ type: string
+ type: array
+ type: object
+ failureThreshold:
+ description: |-
+ Minimum consecutive failures for the probe to be considered failed after having succeeded.
+ Defaults to 3. Minimum value is 1.
+ format: int32
+ type: integer
+ grpc:
+ description: GRPC specifies an action involving
+ a GRPC port.
+ properties:
+ port:
+ description: Port number of the gRPC service.
+ Number must be in the range 1 to 65535.
+ format: int32
+ type: integer
+ service:
+ description: |-
+ Service is the name of the service to place in the gRPC HealthCheckRequest
+ (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).
+
+
+ If this is not specified, the default behavior is defined by gRPC.
+ type: string
+ required:
+ - port
+ type: object
+ httpGet:
+ description: HTTPGet specifies the http request
+ to perform.
+ properties:
+ host:
+ description: |-
+ Host name to connect to, defaults to the pod IP. You probably want to set
+ "Host" in httpHeaders instead.
+ type: string
+ httpHeaders:
+ description: Custom headers to set in the request.
+ HTTP allows repeated headers.
+ items:
+ description: HTTPHeader describes a custom
+ header to be used in HTTP probes
+ properties:
+ name:
+ description: |-
+ The header field name.
+ This will be canonicalized upon output, so case-variant names will be understood as the same header.
+ type: string
+ value:
+ description: The header field value
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ path:
+ description: Path to access on the HTTP server.
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Name or number of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ scheme:
+ description: |-
+ Scheme to use for connecting to the host.
+ Defaults to HTTP.
+ type: string
+ required:
+ - port
+ type: object
+ initialDelaySeconds:
+ description: |-
+ Number of seconds after the container has started before liveness probes are initiated.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ periodSeconds:
+ description: |-
+ How often (in seconds) to perform the probe.
+ Default to 10 seconds. Minimum value is 1.
+ format: int32
+ type: integer
+ successThreshold:
+ description: |-
+ Minimum consecutive successes for the probe to be considered successful after having failed.
+ Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
+ format: int32
+ type: integer
+ tcpSocket:
+ description: TCPSocket specifies an action involving
+ a TCP port.
+ properties:
+ host:
+ description: 'Optional: Host name to connect
+ to, defaults to the pod IP.'
+ type: string
+ port:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ Number or name of the port to access on the container.
+ Number must be in the range 1 to 65535.
+ Name must be an IANA_SVC_NAME.
+ x-kubernetes-int-or-string: true
+ required:
+ - port
+ type: object
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully upon probe failure.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this
+ value overrides the value provided by the pod spec.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
+ format: int64
+ type: integer
+ timeoutSeconds:
+ description: |-
+ Number of seconds after which the probe times out.
+ Defaults to 1 second. Minimum value is 1.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ format: int32
+ type: integer
+ type: object
+ stdin:
+ description: |-
+ Whether this container should allocate a buffer for stdin in the container runtime. If this
+ is not set, reads from stdin in the container will always result in EOF.
+ Default is false.
+ type: boolean
+ stdinOnce:
+ description: |-
+ Whether the container runtime should close the stdin channel after it has been opened by
+ a single attach. When stdin is true the stdin stream will remain open across multiple attach
+ sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the
+ first client attaches to stdin, and then remains open and accepts data until the client disconnects,
+ at which time stdin is closed and remains closed until the container is restarted. If this
+ flag is false, a container processes that reads from stdin will never receive an EOF.
+ Default is false
+ type: boolean
+ terminationMessagePath:
+ description: |-
+ Optional: Path at which the file to which the container's termination message
+ will be written is mounted into the container's filesystem.
+ Message written is intended to be brief final status, such as an assertion failure message.
+ Will be truncated by the node if greater than 4096 bytes. The total message length across
+ all containers will be limited to 12kb.
+ Defaults to /dev/termination-log.
+ Cannot be updated.
+ type: string
+ terminationMessagePolicy:
+ description: |-
+ Indicate how the termination message should be populated. File will use the contents of
+ terminationMessagePath to populate the container status message on both success and failure.
+ FallbackToLogsOnError will use the last chunk of container log output if the termination
+ message file is empty and the container exited with an error.
+ The log output is limited to 2048 bytes or 80 lines, whichever is smaller.
+ Defaults to File.
+ Cannot be updated.
+ type: string
+ tty:
+ description: |-
+ Whether this container should allocate a TTY for itself, also requires 'stdin' to be true.
+ Default is false.
+ type: boolean
+ volumeDevices:
+ description: volumeDevices is the list of block devices
+ to be used by the container.
+ items:
+ description: volumeDevice describes a mapping of a
+ raw block device within a container.
+ properties:
+ devicePath:
+ description: devicePath is the path inside of
+ the container that the device will be mapped
+ to.
+ type: string
+ name:
+ description: name must match the name of a persistentVolumeClaim
+ in the pod
+ type: string
+ required:
+ - devicePath
+ - name
+ type: object
+ type: array
+ volumeMounts:
+ description: |-
+ Pod volumes to mount into the container's filesystem.
+ Cannot be updated.
+ items:
+ description: VolumeMount describes a mounting of a
+ Volume within a container.
+ properties:
+ mountPath:
+ description: |-
+ Path within the container at which the volume should be mounted. Must
+ not contain ':'.
+ type: string
+ mountPropagation:
+ description: |-
+ mountPropagation determines how mounts are propagated from the host
+ to container and the other way around.
+ When not set, MountPropagationNone is used.
+ This field is beta in 1.10.
+ type: string
+ name:
+ description: This must match the Name of a Volume.
+ type: string
+ readOnly:
+ description: |-
+ Mounted read-only if true, read-write otherwise (false or unspecified).
+ Defaults to false.
+ type: boolean
+ subPath:
+ description: |-
+ Path within the volume from which the container's volume should be mounted.
+ Defaults to "" (volume's root).
+ type: string
+ subPathExpr:
+ description: |-
+ Expanded path within the volume from which the container's volume should be mounted.
+ Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment.
+ Defaults to "" (volume's root).
+ SubPathExpr and SubPath are mutually exclusive.
+ type: string
+ required:
+ - mountPath
+ - name
+ type: object
+ type: array
+ workingDir:
+ description: |-
+ Container's working directory.
+ If not specified, the container runtime's default will be used, which
+ might be configured in the container image.
+ Cannot be updated.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ nodeName:
+ description: |-
+ NodeName is a request to schedule this pod onto a specific node. If it is non-empty,
+ the scheduler simply schedules this pod onto that node, assuming that it fits resource
+ requirements.
+ type: string
+ nodeSelector:
+ additionalProperties:
+ type: string
+ description: |-
+ NodeSelector is a selector which must be true for the pod to fit on a node.
+ Selector which must match a node's labels for the pod to be scheduled on that node.
+ More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ type: object
+ x-kubernetes-map-type: atomic
+ os:
+ description: |-
+ Specifies the OS of the containers in the pod.
+ Some pod and container fields are restricted if this is set.
+
+
+ If the OS field is set to linux, the following fields must be unset:
+ -securityContext.windowsOptions
+
+
+ If the OS field is set to windows, following fields must be unset:
+ - spec.hostPID
+ - spec.hostIPC
+ - spec.hostUsers
+ - spec.securityContext.seLinuxOptions
+ - spec.securityContext.seccompProfile
+ - spec.securityContext.fsGroup
+ - spec.securityContext.fsGroupChangePolicy
+ - spec.securityContext.sysctls
+ - spec.shareProcessNamespace
+ - spec.securityContext.runAsUser
+ - spec.securityContext.runAsGroup
+ - spec.securityContext.supplementalGroups
+ - spec.containers[*].securityContext.seLinuxOptions
+ - spec.containers[*].securityContext.seccompProfile
+ - spec.containers[*].securityContext.capabilities
+ - spec.containers[*].securityContext.readOnlyRootFilesystem
+ - spec.containers[*].securityContext.privileged
+ - spec.containers[*].securityContext.allowPrivilegeEscalation
+ - spec.containers[*].securityContext.procMount
+ - spec.containers[*].securityContext.runAsUser
+ - spec.containers[*].securityContext.runAsGroup
+ properties:
+ name:
+ description: |-
+ Name is the name of the operating system. The currently supported values are linux and windows.
+ Additional value may be defined in future and can be one of:
+ https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration
+ Clients should expect to handle additional values and treat unrecognized values in this field as os: null
+ type: string
+ required:
+ - name
+ type: object
+ overhead:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Overhead represents the resource overhead associated with running a pod for a given RuntimeClass.
+ This field will be autopopulated at admission time by the RuntimeClass admission controller. If
+ the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests.
+ The RuntimeClass admission controller will reject Pod create requests which have the overhead already
+ set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
+ defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ type: object
+ preemptionPolicy:
+ description: |-
+ PreemptionPolicy is the Policy for preempting pods with lower priority.
+ One of Never, PreemptLowerPriority.
+ Defaults to PreemptLowerPriority if unset.
+ type: string
+ priority:
+ description: |-
+ The priority value. Various system components use this field to find the
+ priority of the pod. When Priority Admission Controller is enabled, it
+ prevents users from setting this field. The admission controller populates
+ this field from PriorityClassName.
+ The higher the value, the higher the priority.
+ format: int32
+ type: integer
+ priorityClassName:
+ description: |-
+ If specified, indicates the pod's priority. "system-node-critical" and
+ "system-cluster-critical" are two special keywords which indicate the
+ highest priorities with the former being the highest priority. Any other
+ name must be defined by creating a PriorityClass object with that name.
+ If not specified, the pod priority will be default or zero if there is no
+ default.
+ type: string
+ readinessGates:
+ description: |-
+ If specified, all readiness gates will be evaluated for pod readiness.
+ A pod is ready when all its containers are ready AND
+ all conditions specified in the readiness gates have status equal to "True"
+ More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
+ items:
+ description: PodReadinessGate contains the reference to
+ a pod condition
+ properties:
+ conditionType:
+ description: ConditionType refers to a condition in
+ the pod's condition list with matching type.
+ type: string
+ required:
+ - conditionType
+ type: object
+ type: array
+ resourceClaims:
+ description: |-
+ ResourceClaims defines which ResourceClaims must be allocated
+ and reserved before the Pod is allowed to start. The resources
+ will be made available to those containers which consume them
+ by name.
+
+
+ This is an alpha field and requires enabling the
+ DynamicResourceAllocation feature gate.
+
+
+ This field is immutable.
+ items:
+ description: |-
+ PodResourceClaim references exactly one ResourceClaim through a ClaimSource.
+ It adds a name to it that uniquely identifies the ResourceClaim inside the Pod.
+ Containers that need access to the ResourceClaim reference it with this name.
+ properties:
+ name:
+ description: |-
+ Name uniquely identifies this resource claim inside the pod.
+ This must be a DNS_LABEL.
+ type: string
+ source:
+ description: Source describes where to find the ResourceClaim.
+ properties:
+ resourceClaimName:
+ description: |-
+ ResourceClaimName is the name of a ResourceClaim object in the same
+ namespace as this pod.
+ type: string
+ resourceClaimTemplateName:
+ description: |-
+ ResourceClaimTemplateName is the name of a ResourceClaimTemplate
+ object in the same namespace as this pod.
+
+
+ The template will be used to create a new ResourceClaim, which will
+ be bound to this pod. When this pod is deleted, the ResourceClaim
+ will also be deleted. The pod name and resource name, along with a
+ generated component, will be used to form a unique name for the
+ ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.
+
+
+ This field is immutable and no changes will be made to the
+ corresponding ResourceClaim by the control plane after creating the
+ ResourceClaim.
+ type: string
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ restartPolicy:
+ description: |-
+ Restart policy for all containers within the pod.
+ One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted.
+ Default to Always.
+ More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
+ type: string
+ runtimeClassName:
+ description: |-
+ RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
+ to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
+ If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
+ empty definition that uses the default runtime handler.
+ More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
+ type: string
+ schedulerName:
+ description: |-
+ If specified, the pod will be dispatched by specified scheduler.
+ If not specified, the pod will be dispatched by default scheduler.
+ type: string
+ schedulingGates:
+ description: |-
+ SchedulingGates is an opaque list of values that if specified will block scheduling the pod.
+ If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the
+ scheduler will not attempt to schedule the pod.
+
+
+ SchedulingGates can only be set at pod creation time, and be removed only afterwards.
+
+
+ This is a beta feature enabled by the PodSchedulingReadiness feature gate.
+ items:
+ description: PodSchedulingGate is associated to a Pod to
+ guard its scheduling.
+ properties:
+ name:
+ description: |-
+ Name of the scheduling gate.
+ Each scheduling gate must have a unique name field.
+ type: string
+ required:
+ - name
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - name
+ x-kubernetes-list-type: map
+ securityContext:
+ description: |-
+ SecurityContext holds pod-level security attributes and common container settings.
+ Optional: Defaults to empty. See type description for default values of each field.
+ properties:
+ fsGroup:
+ description: |-
+ A special supplemental group that applies to all containers in a pod.
+ Some volume types allow the Kubelet to change the ownership of that volume
+ to be owned by the pod:
+
+
+ 1. The owning GID will be the FSGroup
+ 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
+ 3. The permission bits are OR'd with rw-rw----
+
+
+ If unset, the Kubelet will not modify the ownership and permissions of any volume.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ fsGroupChangePolicy:
+ description: |-
+ fsGroupChangePolicy defines behavior of changing ownership and permission of the volume
+ before being exposed inside Pod. This field will only apply to
+ volume types which support fsGroup based ownership(and permissions).
+ It will have no effect on ephemeral volume types such as: secret, configmaps
+ and emptydir.
+ Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used.
+ Note that this field cannot be set when spec.os.name is windows.
+ type: string
+ runAsGroup:
+ description: |-
+ The GID to run the entrypoint of the container process.
+ Uses runtime default if unset.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ runAsNonRoot:
+ description: |-
+ Indicates that the container must run as a non-root user.
+ If true, the Kubelet will validate the image at runtime to ensure that it
+ does not run as UID 0 (root) and fail to start the container if it does.
+ If unset or false, no such validation will be performed.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: boolean
+ runAsUser:
+ description: |-
+ The UID to run the entrypoint of the container process.
+ Defaults to user specified in image metadata if unspecified.
+ May also be set in SecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence
+ for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ format: int64
+ type: integer
+ seLinuxOptions:
+ description: |-
+ The SELinux context to be applied to all containers.
+ If unspecified, the container runtime will allocate a random SELinux context for each
+ container. May also be set in SecurityContext. If set in
+ both SecurityContext and PodSecurityContext, the value specified in SecurityContext
+ takes precedence for that container.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ level:
+ description: Level is SELinux level label that applies
+ to the container.
+ type: string
+ role:
+ description: Role is a SELinux role label that applies
+ to the container.
+ type: string
+ type:
+ description: Type is a SELinux type label that applies
+ to the container.
+ type: string
+ user:
+ description: User is a SELinux user label that applies
+ to the container.
+ type: string
+ type: object
+ seccompProfile:
+ description: |-
+ The seccomp options to use by the containers in this pod.
+ Note that this field cannot be set when spec.os.name is windows.
+ properties:
+ localhostProfile:
+ description: |-
+ localhostProfile indicates a profile defined in a file on the node should be used.
+ The profile must be preconfigured on the node to work.
+ Must be a descending path, relative to the kubelet's configured seccomp profile location.
+ Must be set if type is "Localhost". Must NOT be set for any other type.
+ type: string
+ type:
+ description: |-
+ type indicates which kind of seccomp profile will be applied.
+ Valid options are:
+
+
+ Localhost - a profile defined in a file on the node should be used.
+ RuntimeDefault - the container runtime default profile should be used.
+ Unconfined - no profile should be applied.
+ type: string
+ required:
+ - type
+ type: object
+ supplementalGroups:
+ description: |-
+ A list of groups applied to the first process run in each container, in addition
+ to the container's primary GID, the fsGroup (if specified), and group memberships
+ defined in the container image for the uid of the container process. If unspecified,
+ no additional groups are added to any container. Note that group memberships
+ defined in the container image for the uid of the container process are still effective,
+ even if they are not included in this list.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ format: int64
+ type: integer
+ type: array
+ sysctls:
+ description: |-
+ Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported
+ sysctls (by the container runtime) might fail to launch.
+ Note that this field cannot be set when spec.os.name is windows.
+ items:
+ description: Sysctl defines a kernel parameter to be
+ set
+ properties:
+ name:
+ description: Name of a property to set
+ type: string
+ value:
+ description: Value of a property to set
+ type: string
+ required:
+ - name
+ - value
+ type: object
+ type: array
+ windowsOptions:
+ description: |-
+ The Windows specific settings applied to all containers.
+ If unspecified, the options within a container's SecurityContext will be used.
+ If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.
+ Note that this field cannot be set when spec.os.name is linux.
+ properties:
+ gmsaCredentialSpec:
+ description: |-
+ GMSACredentialSpec is where the GMSA admission webhook
+ (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the
+ GMSA credential spec named by the GMSACredentialSpecName field.
+ type: string
+ gmsaCredentialSpecName:
+ description: GMSACredentialSpecName is the name of
+ the GMSA credential spec to use.
+ type: string
+ hostProcess:
+ description: |-
+ HostProcess determines if a container should be run as a 'Host Process' container.
+ All of a Pod's containers must have the same effective HostProcess value
+ (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers).
+ In addition, if HostProcess is true then HostNetwork must also be set to true.
+ type: boolean
+ runAsUserName:
+ description: |-
+ The UserName in Windows to run the entrypoint of the container process.
+ Defaults to the user specified in image metadata if unspecified.
+ May also be set in PodSecurityContext. If set in both SecurityContext and
+ PodSecurityContext, the value specified in SecurityContext takes precedence.
+ type: string
+ type: object
+ type: object
+ serviceAccount:
+ description: |-
+ DeprecatedServiceAccount is a depreciated alias for ServiceAccountName.
+ Deprecated: Use serviceAccountName instead.
+ type: string
+ serviceAccountName:
+ description: |-
+ ServiceAccountName is the name of the ServiceAccount to use to run this pod.
+ More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
+ type: string
+ setHostnameAsFQDN:
+ description: |-
+ If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default).
+ In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname).
+ In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN.
+ If a pod does not have FQDN, this has no effect.
+ Default to false.
+ type: boolean
+ shareProcessNamespace:
+ description: |-
+ Share a single process namespace between all of the containers in a pod.
+ When this is set containers will be able to view and signal processes from other containers
+ in the same pod, and the first process in each container will not be assigned PID 1.
+ HostPID and ShareProcessNamespace cannot both be set.
+ Optional: Default to false.
+ type: boolean
+ subdomain:
+ description: |-
+ If specified, the fully qualified Pod hostname will be "...svc.".
+ If not specified, the pod will not have a domainname at all.
+ type: string
+ terminationGracePeriodSeconds:
+ description: |-
+ Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
+ Value must be non-negative integer. The value zero indicates stop immediately via
+ the kill signal (no opportunity to shut down).
+ If this value is nil, the default grace period will be used instead.
+ The grace period is the duration in seconds after the processes running in the pod are sent
+ a termination signal and the time when the processes are forcibly halted with a kill signal.
+ Set this value longer than the expected cleanup time for your process.
+ Defaults to 30 seconds.
+ format: int64
+ type: integer
+ tolerations:
+ description: If specified, the pod's tolerations.
+ items:
+ description: |-
+ The pod this Toleration is attached to tolerates any taint that matches
+ the triple using the matching operator .
+ properties:
+ effect:
+ description: |-
+ Effect indicates the taint effect to match. Empty means match all taint effects.
+ When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
+ type: string
+ key:
+ description: |-
+ Key is the taint key that the toleration applies to. Empty means match all taint keys.
+ If the key is empty, operator must be Exists; this combination means to match all values and all keys.
+ type: string
+ operator:
+ description: |-
+ Operator represents a key's relationship to the value.
+ Valid operators are Exists and Equal. Defaults to Equal.
+ Exists is equivalent to wildcard for value, so that a pod can
+ tolerate all taints of a particular category.
+ type: string
+ tolerationSeconds:
+ description: |-
+ TolerationSeconds represents the period of time the toleration (which must be
+ of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default,
+ it is not set, which means tolerate the taint forever (do not evict). Zero and
+ negative values will be treated as 0 (evict immediately) by the system.
+ format: int64
+ type: integer
+ value:
+ description: |-
+ Value is the taint value the toleration matches to.
+ If the operator is Exists, the value should be empty, otherwise just a regular string.
+ type: string
+ type: object
+ type: array
+ topologySpreadConstraints:
+ description: |-
+ TopologySpreadConstraints describes how a group of pods ought to spread across topology
+ domains. Scheduler will schedule pods in a way which abides by the constraints.
+ All topologySpreadConstraints are ANDed.
+ items:
+ description: TopologySpreadConstraint specifies how to spread
+ matching pods among the given topology.
+ properties:
+ labelSelector:
+ description: |-
+ LabelSelector is used to find matching pods.
+ Pods that match this label selector are counted to determine the number of pods
+ in their corresponding topology domain.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list of label
+ selector requirements. The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label key that the
+ selector applies to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ matchLabelKeys:
+ description: |-
+ MatchLabelKeys is a set of pod label keys to select the pods over which
+ spreading will be calculated. The keys are used to lookup values from the
+ incoming pod labels, those key-value labels are ANDed with labelSelector
+ to select the group of existing pods over which spreading will be calculated
+ for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+ MatchLabelKeys cannot be set when LabelSelector isn't set.
+ Keys that don't exist in the incoming pod labels will
+ be ignored. A null or empty list means only match against labelSelector.
+
+
+ This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).
+ items:
+ type: string
+ type: array
+ x-kubernetes-list-type: atomic
+ maxSkew:
+ description: |-
+ MaxSkew describes the degree to which pods may be unevenly distributed.
+ When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference
+ between the number of matching pods in the target topology and the global minimum.
+ The global minimum is the minimum number of matching pods in an eligible domain
+ or zero if the number of eligible domains is less than MinDomains.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 2/2/1:
+ In this case, the global minimum is 1.
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P |
+ - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+ scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+ violate MaxSkew(1).
+ - if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+ When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence
+ to topologies that satisfy it.
+ It's a required field. Default value is 1 and 0 is not allowed.
+ format: int32
+ type: integer
+ minDomains:
+ description: |-
+ MinDomains indicates a minimum number of eligible domains.
+ When the number of eligible domains with matching topology keys is less than minDomains,
+ Pod Topology Spread treats "global minimum" as 0, and then the calculation of Skew is performed.
+ And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+ this value has no effect on scheduling.
+ As a result, when the number of eligible domains is less than minDomains,
+ scheduler won't schedule more than maxSkew Pods to those domains.
+ If value is nil, the constraint behaves as if MinDomains is equal to 1.
+ Valid values are integers greater than 0.
+ When value is not nil, WhenUnsatisfiable must be DoNotSchedule.
+
+
+ For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+ labelSelector spread as 2/2/2:
+ | zone1 | zone2 | zone3 |
+ | P P | P P | P P |
+ The number of domains is less than 5(MinDomains), so "global minimum" is treated as 0.
+ In this situation, new pod with the same labelSelector cannot be scheduled,
+ because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+ it will violate MaxSkew.
+
+
+ This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).
+ format: int32
+ type: integer
+ nodeAffinityPolicy:
+ description: |-
+ NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector
+ when calculating pod topology spread skew. Options are:
+ - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+ - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.
+
+
+ If this value is nil, the behavior is equivalent to the Honor policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ nodeTaintsPolicy:
+ description: |-
+ NodeTaintsPolicy indicates how we will treat node taints when calculating
+ pod topology spread skew. Options are:
+ - Honor: nodes without taints, along with tainted nodes for which the incoming pod
+ has a toleration, are included.
+ - Ignore: node taints are ignored. All nodes are included.
+
+
+ If this value is nil, the behavior is equivalent to the Ignore policy.
+ This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.
+ type: string
+ topologyKey:
+ description: |-
+ TopologyKey is the key of node labels. Nodes that have a label with this key
+ and identical values are considered to be in the same topology.
+ We consider each as a "bucket", and try to put balanced number
+ of pods into each bucket.
+ We define a domain as a particular instance of a topology.
+ Also, we define an eligible domain as a domain whose nodes meet the requirements of
+ nodeAffinityPolicy and nodeTaintsPolicy.
+ e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology.
+ And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology.
+ It's a required field.
+ type: string
+ whenUnsatisfiable:
+ description: |-
+ WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy
+ the spread constraint.
+ - DoNotSchedule (default) tells the scheduler not to schedule it.
+ - ScheduleAnyway tells the scheduler to schedule the pod in any location,
+ but giving higher precedence to topologies that would help reduce the
+ skew.
+ A constraint is considered "Unsatisfiable" for an incoming pod
+ if and only if every possible node assignment for that pod would violate
+ "MaxSkew" on some topology.
+ For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+ labelSelector spread as 3/1/1:
+ | zone1 | zone2 | zone3 |
+ | P P P | P | P |
+ If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+ to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+ MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+ won't make it *more* imbalanced.
+ It's a required field.
+ type: string
+ required:
+ - maxSkew
+ - topologyKey
+ - whenUnsatisfiable
+ type: object
+ type: array
+ x-kubernetes-list-map-keys:
+ - topologyKey
+ - whenUnsatisfiable
+ x-kubernetes-list-type: map
+ volumes:
+ description: |-
+ List of volumes that can be mounted by containers belonging to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes
+ items:
+ description: Volume represents a named volume in a pod that
+ may be accessed by any container in the pod.
+ properties:
+ awsElasticBlockStore:
+ description: |-
+ awsElasticBlockStore represents an AWS Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly value true will force the readOnly setting in VolumeMounts.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: boolean
+ volumeID:
+ description: |-
+ volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore
+ type: string
+ required:
+ - volumeID
+ type: object
+ azureDisk:
+ description: azureDisk represents an Azure Data Disk
+ mount on the host and bind mount to the pod.
+ properties:
+ cachingMode:
+ description: 'cachingMode is the Host Caching mode:
+ None, Read Only, Read Write.'
+ type: string
+ diskName:
+ description: diskName is the Name of the data disk
+ in the blob storage
+ type: string
+ diskURI:
+ description: diskURI is the URI of data disk in
+ the blob storage
+ type: string
+ fsType:
+ description: |-
+ fsType is Filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ kind:
+ description: 'kind expected values are Shared: multiple
+ blob disks per storage account Dedicated: single
+ blob disk per storage account Managed: azure
+ managed data disk (only in managed availability
+ set). defaults to shared'
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ required:
+ - diskName
+ - diskURI
+ type: object
+ azureFile:
+ description: azureFile represents an Azure File Service
+ mount on the host and bind mount to the pod.
+ properties:
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretName:
+ description: secretName is the name of secret that
+ contains Azure Storage Account Name and Key
+ type: string
+ shareName:
+ description: shareName is the azure share Name
+ type: string
+ required:
+ - secretName
+ - shareName
+ type: object
+ cephfs:
+ description: cephFS represents a Ceph FS mount on the
+ host that shares a pod's lifetime
+ properties:
+ monitors:
+ description: |-
+ monitors is Required: Monitors is a collection of Ceph monitors
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ path:
+ description: 'path is Optional: Used as the mounted
+ root, rather than the full Ceph tree, default
+ is /'
+ type: string
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: boolean
+ secretFile:
+ description: |-
+ secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ secretRef:
+ description: |-
+ secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty.
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is optional: User is the rados user name, default is admin
+ More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it
+ type: string
+ required:
+ - monitors
+ type: object
+ cinder:
+ description: |-
+ cinder represents a cinder volume attached and mounted on kubelets host machine.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is optional: points to a secret object containing parameters used to connect
+ to OpenStack.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeID:
+ description: |-
+ volumeID used to identify the volume in cinder.
+ More info: https://examples.k8s.io/mysql-cinder-pd/README.md
+ type: string
+ required:
+ - volumeID
+ type: object
+ configMap:
+ description: configMap represents a configMap that should
+ populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional specify whether the ConfigMap
+ or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ csi:
+ description: csi (Container Storage Interface) represents
+ ephemeral storage that is handled by certain external
+ CSI drivers (Beta feature).
+ properties:
+ driver:
+ description: |-
+ driver is the name of the CSI driver that handles this volume.
+ Consult with your admin for the correct name as registered in the cluster.
+ type: string
+ fsType:
+ description: |-
+ fsType to mount. Ex. "ext4", "xfs", "ntfs".
+ If not provided, the empty value is passed to the associated CSI driver
+ which will determine the default filesystem to apply.
+ type: string
+ nodePublishSecretRef:
+ description: |-
+ nodePublishSecretRef is a reference to the secret object containing
+ sensitive information to pass to the CSI driver to complete the CSI
+ NodePublishVolume and NodeUnpublishVolume calls.
+ This field is optional, and may be empty if no secret is required. If the
+ secret object contains more than one secret, all secret references are passed.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ readOnly:
+ description: |-
+ readOnly specifies a read-only configuration for the volume.
+ Defaults to false (read/write).
+ type: boolean
+ volumeAttributes:
+ additionalProperties:
+ type: string
+ description: |-
+ volumeAttributes stores driver-specific properties that are passed to the CSI
+ driver. Consult your driver's documentation for supported values.
+ type: object
+ required:
+ - driver
+ type: object
+ downwardAPI:
+ description: downwardAPI represents downward API about
+ the pod that should populate this volume
+ properties:
+ defaultMode:
+ description: |-
+ Optional: mode bits to use on created files by default. Must be a
+ Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: Items is a list of downward API volume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing the
+ pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects a field of
+ the pod: only annotations, labels, name
+ and namespace are supported.'
+ properties:
+ apiVersion:
+ description: Version of the schema the
+ FieldPath is written in terms of, defaults
+ to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field to select
+ in the specified API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the relative
+ path name of the file to be created. Must
+ not be absolute or contain the ''..'' path.
+ Must be utf-8 encoded. The first item of
+ the relative path must not start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name: required
+ for volumes, optional for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output format
+ of the exposed resources, defaults to
+ "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ emptyDir:
+ description: |-
+ emptyDir represents a temporary directory that shares a pod's lifetime.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ properties:
+ medium:
+ description: |-
+ medium represents what type of storage medium should back this directory.
+ The default is "" which means to use the node's default medium.
+ Must be an empty string (default) or Memory.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ type: string
+ sizeLimit:
+ anyOf:
+ - type: integer
+ - type: string
+ description: |-
+ sizeLimit is the total amount of local storage required for this EmptyDir volume.
+ The size limit is also applicable for memory medium.
+ The maximum usage on memory medium EmptyDir would be the minimum value between
+ the SizeLimit specified here and the sum of memory limits of all containers in a pod.
+ The default is nil which means that the limit is undefined.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ type: object
+ ephemeral:
+ description: |-
+ ephemeral represents a volume that is handled by a cluster storage driver.
+ The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
+ and deleted when the pod is removed.
+
+
+ Use this if:
+ a) the volume is only needed while the pod runs,
+ b) features of normal volumes like restoring from snapshot or capacity
+ tracking are needed,
+ c) the storage driver is specified through a storage class, and
+ d) the storage driver supports dynamic volume provisioning through
+ a PersistentVolumeClaim (see EphemeralVolumeSource for more
+ information on the connection between this volume type
+ and PersistentVolumeClaim).
+
+
+ Use PersistentVolumeClaim or one of the vendor-specific
+ APIs for volumes that persist for longer than the lifecycle
+ of an individual pod.
+
+
+ Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
+ be used that way - see the documentation of the driver for
+ more information.
+
+
+ A pod can use both types of ephemeral volumes and
+ persistent volumes at the same time.
+ properties:
+ volumeClaimTemplate:
+ description: |-
+ Will be used to create a stand-alone PVC to provision the volume.
+ The pod in which this EphemeralVolumeSource is embedded will be the
+ owner of the PVC, i.e. the PVC will be deleted together with the
+ pod. The name of the PVC will be `-` where
+ `` is the name from the `PodSpec.Volumes` array
+ entry. Pod validation will reject the pod if the concatenated name
+ is not valid for a PVC (for example, too long).
+
+
+ An existing PVC with that name that is not owned by the pod
+ will *not* be used for the pod to avoid using an unrelated
+ volume by mistake. Starting the pod is then blocked until
+ the unrelated PVC is removed. If such a pre-created PVC is
+ meant to be used by the pod, the PVC has to updated with an
+ owner reference to the pod once the pod exists. Normally
+ this should not be necessary, but it may be useful when
+ manually reconstructing a broken cluster.
+
+
+ This field is read-only and no changes will be made by Kubernetes
+ to the PVC after it has been created.
+
+
+ Required, must not be nil.
+ properties:
+ metadata:
+ description: |-
+ May contain labels and annotations that will be copied into the PVC
+ when creating it. No other fields are allowed and will be rejected during
+ validation.
+ type: object
+ spec:
+ description: |-
+ The specification for the PersistentVolumeClaim. The entire content is
+ copied unchanged into the PVC that gets created from this
+ template. The same fields as in a PersistentVolumeClaim
+ are also valid here.
+ properties:
+ accessModes:
+ description: |-
+ accessModes contains the desired access modes the volume should have.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1
+ items:
+ type: string
+ type: array
+ dataSource:
+ description: |-
+ dataSource field can be used to specify either:
+ * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+ * An existing PVC (PersistentVolumeClaim)
+ If the provisioner or an external controller can support the specified data source,
+ it will create a new volume based on the contents of the specified data source.
+ When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef,
+ and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified.
+ If the namespace is specified, then dataSourceRef will not be copied to dataSource.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ x-kubernetes-map-type: atomic
+ dataSourceRef:
+ description: |-
+ dataSourceRef specifies the object from which to populate the volume with data, if a non-empty
+ volume is desired. This may be any object from a non-empty API group (non
+ core object) or a PersistentVolumeClaim object.
+ When this field is specified, volume binding will only succeed if the type of
+ the specified object matches some installed volume populator or dynamic
+ provisioner.
+ This field will replace the functionality of the dataSource field and as such
+ if both fields are non-empty, they must have the same value. For backwards
+ compatibility, when namespace isn't specified in dataSourceRef,
+ both fields (dataSource and dataSourceRef) will be set to the same
+ value automatically if one of them is empty and the other is non-empty.
+ When namespace is specified in dataSourceRef,
+ dataSource isn't set to the same value and must be empty.
+ There are three important differences between dataSource and dataSourceRef:
+ * While dataSource only allows two specific types of objects, dataSourceRef
+ allows any non-core object, as well as PersistentVolumeClaim objects.
+ * While dataSource ignores disallowed values (dropping them), dataSourceRef
+ preserves all values, and generates an error if a disallowed value is
+ specified.
+ * While dataSource only allows local objects, dataSourceRef allows objects
+ in any namespaces.
+ (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ properties:
+ apiGroup:
+ description: |-
+ APIGroup is the group for the resource being referenced.
+ If APIGroup is not specified, the specified Kind must be in the core API group.
+ For any other third-party types, APIGroup is required.
+ type: string
+ kind:
+ description: Kind is the type of resource
+ being referenced
+ type: string
+ name:
+ description: Name is the name of resource
+ being referenced
+ type: string
+ namespace:
+ description: |-
+ Namespace is the namespace of resource being referenced
+ Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details.
+ (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.
+ type: string
+ required:
+ - kind
+ - name
+ type: object
+ resources:
+ description: |-
+ resources represents the minimum resources the volume should have.
+ If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements
+ that are lower than previous value but must still be higher than capacity recorded in the
+ status field of the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources
+ properties:
+ limits:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Limits describes the maximum amount of compute resources allowed.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ requests:
+ additionalProperties:
+ anyOf:
+ - type: integer
+ - type: string
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ description: |-
+ Requests describes the minimum amount of compute resources required.
+ If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
+ otherwise to an implementation-defined value. Requests cannot exceed Limits.
+ More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+ type: object
+ type: object
+ selector:
+ description: selector is a label query over
+ volumes to consider for binding.
+ properties:
+ matchExpressions:
+ description: matchExpressions is a list
+ of label selector requirements. The
+ requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ storageClassName:
+ description: |-
+ storageClassName is the name of the StorageClass required by the claim.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1
+ type: string
+ volumeAttributesClassName:
+ description: |-
+ volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim.
+ If specified, the CSI driver will create or update the volume with the attributes defined
+ in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName,
+ it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass
+ will be applied to the claim but it's not allowed to reset this field to empty string once it is set.
+ If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass
+ will be set by the persistentvolume controller if it exists.
+ If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be
+ set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource
+ exists.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass
+ (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.
+ type: string
+ volumeMode:
+ description: |-
+ volumeMode defines what type of volume is required by the claim.
+ Value of Filesystem is implied when not included in claim spec.
+ type: string
+ volumeName:
+ description: volumeName is the binding reference
+ to the PersistentVolume backing this claim.
+ type: string
+ type: object
+ required:
+ - spec
+ type: object
+ type: object
+ fc:
+ description: fc represents a Fibre Channel resource
+ that is attached to a kubelet's host machine and then
+ exposed to the pod.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ lun:
+ description: 'lun is Optional: FC target lun number'
+ format: int32
+ type: integer
+ readOnly:
+ description: |-
+ readOnly is Optional: Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ targetWWNs:
+ description: 'targetWWNs is Optional: FC target
+ worldwide names (WWNs)'
+ items:
+ type: string
+ type: array
+ wwids:
+ description: |-
+ wwids Optional: FC volume world wide identifiers (wwids)
+ Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.
+ items:
+ type: string
+ type: array
+ type: object
+ flexVolume:
+ description: |-
+ flexVolume represents a generic volume resource that is
+ provisioned/attached using an exec based plugin.
+ properties:
+ driver:
+ description: driver is the name of the driver to
+ use for this volume.
+ type: string
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
+ type: string
+ options:
+ additionalProperties:
+ type: string
+ description: 'options is Optional: this field holds
+ extra command options if any.'
+ type: object
+ readOnly:
+ description: |-
+ readOnly is Optional: defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is Optional: secretRef is reference to the secret object containing
+ sensitive information to pass to the plugin scripts. This may be
+ empty if no secret object is specified. If the secret object
+ contains more than one secret, all secrets are passed to the plugin
+ scripts.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - driver
+ type: object
+ flocker:
+ description: flocker represents a Flocker volume attached
+ to a kubelet's host machine. This depends on the Flocker
+ control service being running
+ properties:
+ datasetName:
+ description: |-
+ datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker
+ should be considered as deprecated
+ type: string
+ datasetUUID:
+ description: datasetUUID is the UUID of the dataset.
+ This is unique identifier of a Flocker dataset
+ type: string
+ type: object
+ gcePersistentDisk:
+ description: |-
+ gcePersistentDisk represents a GCE Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ partition:
+ description: |-
+ partition is the partition in the volume that you want to mount.
+ If omitted, the default is to mount by volume name.
+ Examples: For volume /dev/sda1, you specify the partition as "1".
+ Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ format: int32
+ type: integer
+ pdName:
+ description: |-
+ pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk
+ type: boolean
+ required:
+ - pdName
+ type: object
+ gitRepo:
+ description: |-
+ gitRepo represents a git repository at a particular revision.
+ DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
+ EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
+ into the Pod's container.
+ properties:
+ directory:
+ description: |-
+ directory is the target directory name.
+ Must not contain or start with '..'. If '.' is supplied, the volume directory will be the
+ git repository. Otherwise, if specified, the volume will contain the git repository in
+ the subdirectory with the given name.
+ type: string
+ repository:
+ description: repository is the URL
+ type: string
+ revision:
+ description: revision is the commit hash for the
+ specified revision.
+ type: string
+ required:
+ - repository
+ type: object
+ glusterfs:
+ description: |-
+ glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md
+ properties:
+ endpoints:
+ description: |-
+ endpoints is the endpoint name that details Glusterfs topology.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ path:
+ description: |-
+ path is the Glusterfs volume path.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Glusterfs volume to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod
+ type: boolean
+ required:
+ - endpoints
+ - path
+ type: object
+ hostPath:
+ description: |-
+ hostPath represents a pre-existing file or directory on the host
+ machine that is directly exposed to the container. This is generally
+ used for system agents or other privileged things that are allowed
+ to see the host machine. Most containers will NOT need this.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ ---
+ TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
+ mount host directories as read/write.
+ properties:
+ path:
+ description: |-
+ path of the directory on the host.
+ If the path is a symlink, it will follow the link to the real path.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ type:
+ description: |-
+ type for HostPath Volume
+ Defaults to ""
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath
+ type: string
+ required:
+ - path
+ type: object
+ iscsi:
+ description: |-
+ iscsi represents an ISCSI Disk resource that is attached to a
+ kubelet's host machine and then exposed to the pod.
+ More info: https://examples.k8s.io/volumes/iscsi/README.md
+ properties:
+ chapAuthDiscovery:
+ description: chapAuthDiscovery defines whether support
+ iSCSI Discovery CHAP authentication
+ type: boolean
+ chapAuthSession:
+ description: chapAuthSession defines whether support
+ iSCSI Session CHAP authentication
+ type: boolean
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ initiatorName:
+ description: |-
+ initiatorName is the custom iSCSI Initiator Name.
+ If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface
+ : will be created for the connection.
+ type: string
+ iqn:
+ description: iqn is the target iSCSI Qualified Name.
+ type: string
+ iscsiInterface:
+ description: |-
+ iscsiInterface is the interface Name that uses an iSCSI transport.
+ Defaults to 'default' (tcp).
+ type: string
+ lun:
+ description: lun represents iSCSI Target Lun number.
+ format: int32
+ type: integer
+ portals:
+ description: |-
+ portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ items:
+ type: string
+ type: array
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ type: boolean
+ secretRef:
+ description: secretRef is the CHAP Secret for iSCSI
+ target and initiator authentication
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ targetPortal:
+ description: |-
+ targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port
+ is other than default (typically TCP ports 860 and 3260).
+ type: string
+ required:
+ - iqn
+ - lun
+ - targetPortal
+ type: object
+ name:
+ description: |-
+ name of the volume.
+ Must be a DNS_LABEL and unique within the pod.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ nfs:
+ description: |-
+ nfs represents an NFS mount on the host that shares a pod's lifetime
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ properties:
+ path:
+ description: |-
+ path that is exported by the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the NFS export to be mounted with read-only permissions.
+ Defaults to false.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: boolean
+ server:
+ description: |-
+ server is the hostname or IP address of the NFS server.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
+ type: string
+ required:
+ - path
+ - server
+ type: object
+ persistentVolumeClaim:
+ description: |-
+ persistentVolumeClaimVolumeSource represents a reference to a
+ PersistentVolumeClaim in the same namespace.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ properties:
+ claimName:
+ description: |-
+ claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims
+ type: string
+ readOnly:
+ description: |-
+ readOnly Will force the ReadOnly setting in VolumeMounts.
+ Default false.
+ type: boolean
+ required:
+ - claimName
+ type: object
+ photonPersistentDisk:
+ description: photonPersistentDisk represents a PhotonController
+ persistent disk attached and mounted on kubelets host
+ machine
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ pdID:
+ description: pdID is the ID that identifies Photon
+ Controller persistent disk
+ type: string
+ required:
+ - pdID
+ type: object
+ portworxVolume:
+ description: portworxVolume represents a portworx volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fSType represents the filesystem type to mount
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ volumeID:
+ description: volumeID uniquely identifies a Portworx
+ volume
+ type: string
+ required:
+ - volumeID
+ type: object
+ projected:
+ description: projected items for all in one resources
+ secrets, configmaps, and downward API
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode are the mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ sources:
+ description: sources is the list of volume projections
+ items:
+ description: Projection that may be projected
+ along with other supported volume types
+ properties:
+ clusterTrustBundle:
+ description: |-
+ ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field
+ of ClusterTrustBundle objects in an auto-updating file.
+
+
+ Alpha, gated by the ClusterTrustBundleProjection feature gate.
+
+
+ ClusterTrustBundle objects can either be selected by name, or by the
+ combination of signer name and a label selector.
+
+
+ Kubelet performs aggressive normalization of the PEM contents written
+ into the pod filesystem. Esoteric PEM features such as inter-block
+ comments and block headers are stripped. Certificates are deduplicated.
+ The ordering of certificates within the file is arbitrary, and Kubelet
+ may change the order over time.
+ properties:
+ labelSelector:
+ description: |-
+ Select all ClusterTrustBundles that match this label selector. Only has
+ effect if signerName is set. Mutually-exclusive with name. If unset,
+ interpreted as "match nothing". If set but empty, interpreted as "match
+ everything".
+ properties:
+ matchExpressions:
+ description: matchExpressions is a
+ list of label selector requirements.
+ The requirements are ANDed.
+ items:
+ description: |-
+ A label selector requirement is a selector that contains values, a key, and an operator that
+ relates the key and values.
+ properties:
+ key:
+ description: key is the label
+ key that the selector applies
+ to.
+ type: string
+ operator:
+ description: |-
+ operator represents a key's relationship to a set of values.
+ Valid operators are In, NotIn, Exists and DoesNotExist.
+ type: string
+ values:
+ description: |-
+ values is an array of string values. If the operator is In or NotIn,
+ the values array must be non-empty. If the operator is Exists or DoesNotExist,
+ the values array must be empty. This array is replaced during a strategic
+ merge patch.
+ items:
+ type: string
+ type: array
+ required:
+ - key
+ - operator
+ type: object
+ type: array
+ matchLabels:
+ additionalProperties:
+ type: string
+ description: |-
+ matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels
+ map is equivalent to an element of matchExpressions, whose key field is "key", the
+ operator is "In", and the values array contains only "value". The requirements are ANDed.
+ type: object
+ type: object
+ x-kubernetes-map-type: atomic
+ name:
+ description: |-
+ Select a single ClusterTrustBundle by object name. Mutually-exclusive
+ with signerName and labelSelector.
+ type: string
+ optional:
+ description: |-
+ If true, don't block pod startup if the referenced ClusterTrustBundle(s)
+ aren't available. If using name, then the named ClusterTrustBundle is
+ allowed not to exist. If using signerName, then the combination of
+ signerName and labelSelector is allowed to match zero
+ ClusterTrustBundles.
+ type: boolean
+ path:
+ description: Relative path from the volume
+ root to write the bundle.
+ type: string
+ signerName:
+ description: |-
+ Select all ClusterTrustBundles that match this signer name.
+ Mutually-exclusive with name. The contents of all selected
+ ClusterTrustBundles will be unified and deduplicated.
+ type: string
+ required:
+ - path
+ type: object
+ configMap:
+ description: configMap information about the
+ configMap data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ ConfigMap will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the ConfigMap,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional specify whether
+ the ConfigMap or its keys must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ downwardAPI:
+ description: downwardAPI information about
+ the downwardAPI data to project
+ properties:
+ items:
+ description: Items is a list of DownwardAPIVolume
+ file
+ items:
+ description: DownwardAPIVolumeFile represents
+ information to create the file containing
+ the pod field
+ properties:
+ fieldRef:
+ description: 'Required: Selects
+ a field of the pod: only annotations,
+ labels, name and namespace are
+ supported.'
+ properties:
+ apiVersion:
+ description: Version of the
+ schema the FieldPath is written
+ in terms of, defaults to "v1".
+ type: string
+ fieldPath:
+ description: Path of the field
+ to select in the specified
+ API version.
+ type: string
+ required:
+ - fieldPath
+ type: object
+ x-kubernetes-map-type: atomic
+ mode:
+ description: |-
+ Optional: mode bits used to set permissions on this file, must be an octal value
+ between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: 'Required: Path is the
+ relative path name of the file
+ to be created. Must not be absolute
+ or contain the ''..'' path. Must
+ be utf-8 encoded. The first item
+ of the relative path must not
+ start with ''..'''
+ type: string
+ resourceFieldRef:
+ description: |-
+ Selects a resource of the container: only resources limits and requests
+ (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.
+ properties:
+ containerName:
+ description: 'Container name:
+ required for volumes, optional
+ for env vars'
+ type: string
+ divisor:
+ anyOf:
+ - type: integer
+ - type: string
+ description: Specifies the output
+ format of the exposed resources,
+ defaults to "1"
+ pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+ x-kubernetes-int-or-string: true
+ resource:
+ description: 'Required: resource
+ to select'
+ type: string
+ required:
+ - resource
+ type: object
+ x-kubernetes-map-type: atomic
+ required:
+ - path
+ type: object
+ type: array
+ type: object
+ secret:
+ description: secret information about the
+ secret data to project
+ properties:
+ items:
+ description: |-
+ items if unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a
+ path within a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ optional:
+ description: optional field specify whether
+ the Secret or its key must be defined
+ type: boolean
+ type: object
+ x-kubernetes-map-type: atomic
+ serviceAccountToken:
+ description: serviceAccountToken is information
+ about the serviceAccountToken data to project
+ properties:
+ audience:
+ description: |-
+ audience is the intended audience of the token. A recipient of a token
+ must identify itself with an identifier specified in the audience of the
+ token, and otherwise should reject the token. The audience defaults to the
+ identifier of the apiserver.
+ type: string
+ expirationSeconds:
+ description: |-
+ expirationSeconds is the requested duration of validity of the service
+ account token. As the token approaches expiration, the kubelet volume
+ plugin will proactively rotate the service account token. The kubelet will
+ start trying to rotate the token if the token is older than 80 percent of
+ its time to live or if the token is older than 24 hours.Defaults to 1 hour
+ and must be at least 10 minutes.
+ format: int64
+ type: integer
+ path:
+ description: |-
+ path is the path relative to the mount point of the file to project the
+ token into.
+ type: string
+ required:
+ - path
+ type: object
+ type: object
+ type: array
+ type: object
+ quobyte:
+ description: quobyte represents a Quobyte mount on the
+ host that shares a pod's lifetime
+ properties:
+ group:
+ description: |-
+ group to map volume access to
+ Default is no group
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the Quobyte volume to be mounted with read-only permissions.
+ Defaults to false.
+ type: boolean
+ registry:
+ description: |-
+ registry represents a single or multiple Quobyte Registry services
+ specified as a string as host:port pair (multiple entries are separated with commas)
+ which acts as the central registry for volumes
+ type: string
+ tenant:
+ description: |-
+ tenant owning the given Quobyte volume in the Backend
+ Used with dynamically provisioned Quobyte volumes, value is set by the plugin
+ type: string
+ user:
+ description: |-
+ user to map volume access to
+ Defaults to serivceaccount user
+ type: string
+ volume:
+ description: volume is a string that references
+ an already created Quobyte volume by name.
+ type: string
+ required:
+ - registry
+ - volume
+ type: object
+ rbd:
+ description: |-
+ rbd represents a Rados Block Device mount on the host that shares a pod's lifetime.
+ More info: https://examples.k8s.io/volumes/rbd/README.md
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type of the volume that you want to mount.
+ Tip: Ensure that the filesystem type is supported by the host operating system.
+ Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd
+ TODO: how do we prevent errors in the filesystem from compromising the machine
+ type: string
+ image:
+ description: |-
+ image is the rados image name.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ keyring:
+ description: |-
+ keyring is the path to key ring for RBDUser.
+ Default is /etc/ceph/keyring.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ monitors:
+ description: |-
+ monitors is a collection of Ceph monitors.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ items:
+ type: string
+ type: array
+ pool:
+ description: |-
+ pool is the rados pool name.
+ Default is rbd.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ readOnly:
+ description: |-
+ readOnly here will force the ReadOnly setting in VolumeMounts.
+ Defaults to false.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef is name of the authentication secret for RBDUser. If provided
+ overrides keyring.
+ Default is nil.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ user:
+ description: |-
+ user is the rados user name.
+ Default is admin.
+ More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it
+ type: string
+ required:
+ - image
+ - monitors
+ type: object
+ scaleIO:
+ description: scaleIO represents a ScaleIO persistent
+ volume attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs".
+ Default is "xfs".
+ type: string
+ gateway:
+ description: gateway is the host address of the
+ ScaleIO API Gateway.
+ type: string
+ protectionDomain:
+ description: protectionDomain is the name of the
+ ScaleIO Protection Domain for the configured storage.
+ type: string
+ readOnly:
+ description: |-
+ readOnly Defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef references to the secret for ScaleIO user and other
+ sensitive information. If this is not provided, Login operation will fail.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ sslEnabled:
+ description: sslEnabled Flag enable/disable SSL
+ communication with Gateway, default false
+ type: boolean
+ storageMode:
+ description: |-
+ storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned.
+ Default is ThinProvisioned.
+ type: string
+ storagePool:
+ description: storagePool is the ScaleIO Storage
+ Pool associated with the protection domain.
+ type: string
+ system:
+ description: system is the name of the storage system
+ as configured in ScaleIO.
+ type: string
+ volumeName:
+ description: |-
+ volumeName is the name of a volume already created in the ScaleIO system
+ that is associated with this volume source.
+ type: string
+ required:
+ - gateway
+ - secretRef
+ - system
+ type: object
+ secret:
+ description: |-
+ secret represents a secret that should populate this volume.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ properties:
+ defaultMode:
+ description: |-
+ defaultMode is Optional: mode bits used to set permissions on created files by default.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values
+ for mode bits. Defaults to 0644.
+ Directories within the path are not affected by this setting.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ items:
+ description: |-
+ items If unspecified, each key-value pair in the Data field of the referenced
+ Secret will be projected into the volume as a file whose name is the
+ key and content is the value. If specified, the listed keys will be
+ projected into the specified paths, and unlisted keys will not be
+ present. If a key is specified which is not present in the Secret,
+ the volume setup will error unless it is marked optional. Paths must be
+ relative and may not contain the '..' path or start with '..'.
+ items:
+ description: Maps a string key to a path within
+ a volume.
+ properties:
+ key:
+ description: key is the key to project.
+ type: string
+ mode:
+ description: |-
+ mode is Optional: mode bits used to set permissions on this file.
+ Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511.
+ YAML accepts both octal and decimal values, JSON requires decimal values for mode bits.
+ If not specified, the volume defaultMode will be used.
+ This might be in conflict with other options that affect the file
+ mode, like fsGroup, and the result can be other mode bits set.
+ format: int32
+ type: integer
+ path:
+ description: |-
+ path is the relative path of the file to map the key to.
+ May not be an absolute path.
+ May not contain the path element '..'.
+ May not start with the string '..'.
+ type: string
+ required:
+ - key
+ - path
+ type: object
+ type: array
+ optional:
+ description: optional field specify whether the
+ Secret or its keys must be defined
+ type: boolean
+ secretName:
+ description: |-
+ secretName is the name of the secret in the pod's namespace to use.
+ More info: https://kubernetes.io/docs/concepts/storage/volumes#secret
+ type: string
+ type: object
+ storageos:
+ description: storageOS represents a StorageOS volume
+ attached and mounted on Kubernetes nodes.
+ properties:
+ fsType:
+ description: |-
+ fsType is the filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ readOnly:
+ description: |-
+ readOnly defaults to false (read/write). ReadOnly here will force
+ the ReadOnly setting in VolumeMounts.
+ type: boolean
+ secretRef:
+ description: |-
+ secretRef specifies the secret to use for obtaining the StorageOS API
+ credentials. If not specified, default values will be attempted.
+ properties:
+ name:
+ description: |-
+ Name of the referent.
+ More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ TODO: Add other useful fields. apiVersion, kind, uid?
+ type: string
+ type: object
+ x-kubernetes-map-type: atomic
+ volumeName:
+ description: |-
+ volumeName is the human-readable name of the StorageOS volume. Volume
+ names are only unique within a namespace.
+ type: string
+ volumeNamespace:
+ description: |-
+ volumeNamespace specifies the scope of the volume within StorageOS. If no
+ namespace is specified then the Pod's namespace will be used. This allows the
+ Kubernetes name scoping to be mirrored within StorageOS for tighter integration.
+ Set VolumeName to any name to override the default behaviour.
+ Set to "default" if you are not using namespaces within StorageOS.
+ Namespaces that do not pre-exist within StorageOS will be created.
+ type: string
+ type: object
+ vsphereVolume:
+ description: vsphereVolume represents a vSphere volume
+ attached and mounted on kubelets host machine
+ properties:
+ fsType:
+ description: |-
+ fsType is filesystem type to mount.
+ Must be a filesystem type supported by the host operating system.
+ Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
+ type: string
+ storagePolicyID:
+ description: storagePolicyID is the storage Policy
+ Based Management (SPBM) profile ID associated
+ with the StoragePolicyName.
+ type: string
+ storagePolicyName:
+ description: storagePolicyName is the storage Policy
+ Based Management (SPBM) profile name.
+ type: string
+ volumePath:
+ description: volumePath is the path that identifies
+ vSphere volume vmdk
+ type: string
+ required:
+ - volumePath
+ type: object
+ required:
+ - name
+ type: object
+ type: array
+ required:
+ - containers
+ type: object
+ type: object
+ type:
+ default: rw
+ description: 'Type of service to forward traffic to. Default: `rw`.'
+ enum:
+ - rw
+ - ro
+ type: string
+ required:
+ - cluster
+ - pgbouncer
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the Pooler. This data may not be up to
+ date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ instances:
+ description: The number of pods trying to be scheduled
+ format: int32
+ type: integer
+ secrets:
+ description: The resource version of the config object
+ properties:
+ clientCA:
+ description: The client CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ pgBouncerSecrets:
+ description: The version of the secrets used by PgBouncer
+ properties:
+ authQuery:
+ description: The auth query secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ serverCA:
+ description: The server CA secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ serverTLS:
+ description: The server TLS secret version
+ properties:
+ name:
+ description: The name of the secret
+ type: string
+ version:
+ description: The ResourceVersion of the secret
+ type: string
+ type: object
+ type: object
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ scale:
+ specReplicasPath: .spec.instances
+ statusReplicasPath: .status.instances
+ status: {}
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.14.0
+ name: scheduledbackups.postgresql.cnpg.io
+spec:
+ group: postgresql.cnpg.io
+ names:
+ kind: ScheduledBackup
+ listKind: ScheduledBackupList
+ plural: scheduledbackups
+ singular: scheduledbackup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .metadata.creationTimestamp
+ name: Age
+ type: date
+ - jsonPath: .spec.cluster.name
+ name: Cluster
+ type: string
+ - jsonPath: .status.lastScheduleTime
+ name: Last Backup
+ type: date
+ name: v1
+ schema:
+ openAPIV3Schema:
+ description: ScheduledBackup is the Schema for the scheduledbackups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: |-
+ Specification of the desired behavior of the ScheduledBackup.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ backupOwnerReference:
+ default: none
+ description: |-
+ Indicates which ownerReference should be put inside the created backup resources.
+ - none: no owner reference for created backup objects (same behavior as before the field was introduced)
+ - self: sets the Scheduled backup object as owner of the backup
+ - cluster: set the cluster as owner of the backup
+ enum:
+ - none
+ - self
+ - cluster
+ type: string
+ cluster:
+ description: The cluster to backup
+ properties:
+ name:
+ description: Name of the referent.
+ type: string
+ required:
+ - name
+ type: object
+ immediate:
+ description: If the first backup has to be immediately start after
+ creation or not
+ type: boolean
+ method:
+ default: barmanObjectStore
+ description: |-
+ The backup method to be used, possible options are `barmanObjectStore`
+ and `volumeSnapshot`. Defaults to: `barmanObjectStore`.
+ enum:
+ - barmanObjectStore
+ - volumeSnapshot
+ type: string
+ online:
+ description: |-
+ Whether the default type of backup with volume snapshots is
+ online/hot (`true`, default) or offline/cold (`false`)
+ Overrides the default setting specified in the cluster field '.spec.backup.volumeSnapshot.online'
+ type: boolean
+ onlineConfiguration:
+ description: |-
+ Configuration parameters to control the online/hot backup with volume snapshots
+ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.onlineConfiguration' stanza
+ properties:
+ immediateCheckpoint:
+ description: |-
+ Control whether the I/O workload for the backup initial checkpoint will
+ be limited, according to the `checkpoint_completion_target` setting on
+ the PostgreSQL server. If set to true, an immediate checkpoint will be
+ used, meaning PostgreSQL will complete the checkpoint as soon as
+ possible. `false` by default.
+ type: boolean
+ waitForArchive:
+ default: true
+ description: |-
+ If false, the function will return immediately after the backup is completed,
+ without waiting for WAL to be archived.
+ This behavior is only useful with backup software that independently monitors WAL archiving.
+ Otherwise, WAL required to make the backup consistent might be missing and make the backup useless.
+ By default, or when this parameter is true, pg_backup_stop will wait for WAL to be archived when archiving is
+ enabled.
+ On a standby, this means that it will wait only when archive_mode = always.
+ If write activity on the primary is low, it may be useful to run pg_switch_wal on the primary in order to trigger
+ an immediate segment switch.
+ type: boolean
+ type: object
+ schedule:
+ description: |-
+ The schedule does not follow the same format used in Kubernetes CronJobs
+ as it includes an additional seconds specifier,
+ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
+ type: string
+ suspend:
+ description: If this backup is suspended or not
+ type: boolean
+ target:
+ description: |-
+ The policy to decide which instance should perform this backup. If empty,
+ it defaults to `cluster.spec.backup.target`.
+ Available options are empty string, `primary` and `prefer-standby`.
+ `primary` to have backups run always on primary instances,
+ `prefer-standby` to have backups run preferably on the most updated
+ standby, if available.
+ enum:
+ - primary
+ - prefer-standby
+ type: string
+ required:
+ - cluster
+ - schedule
+ type: object
+ status:
+ description: |-
+ Most recently observed status of the ScheduledBackup. This data may not be up
+ to date. Populated by the system. Read-only.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
+ properties:
+ lastCheckTime:
+ description: The latest time the schedule
+ format: date-time
+ type: string
+ lastScheduleTime:
+ description: Information when was the last time that backup was successfully
+ scheduled.
+ format: date-time
+ type: string
+ nextScheduleTime:
+ description: Next time we will run a backup
+ format: date-time
+ type: string
+ type: object
+ required:
+ - metadata
+ - spec
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: cnpg-manager
+rules:
+- apiGroups:
+ - ""
+ resources:
+ - configmaps
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - configmaps/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - patch
+- apiGroups:
+ - ""
+ resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - persistentvolumeclaims
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/exec
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - pods/status
+ verbs:
+ - get
+- apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - secrets/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - ""
+ resources:
+ - serviceaccounts
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - ""
+ resources:
+ - services
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - mutatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+- apiGroups:
+ - admissionregistration.k8s.io
+ resources:
+ - validatingwebhookconfigurations
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - update
+- apiGroups:
+ - apps
+ resources:
+ - deployments
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - batch
+ resources:
+ - jobs
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - coordination.k8s.io
+ resources:
+ - leases
+ verbs:
+ - create
+ - get
+ - update
+- apiGroups:
+ - monitoring.coreos.com
+ resources:
+ - podmonitors
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - watch
+- apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - backups/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - clusters/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers/finalizers
+ verbs:
+ - update
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - poolers/status
+ verbs:
+ - get
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - scheduledbackups
+ verbs:
+ - create
+ - delete
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - postgresql.cnpg.io
+ resources:
+ - scheduledbackups/status
+ verbs:
+ - get
+ - patch
+ - update
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - rolebindings
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - rbac.authorization.k8s.io
+ resources:
+ - roles
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - update
+ - watch
+- apiGroups:
+ - snapshot.storage.k8s.io
+ resources:
+ - volumesnapshots
+ verbs:
+ - create
+ - get
+ - list
+ - patch
+ - watch
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: cnpg-manager-rolebinding
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: cnpg-manager
+subjects:
+- kind: ServiceAccount
+ name: cnpg-manager
+ namespace: cnpg-system
+---
+apiVersion: v1
+data:
+ queries: |
+ backends:
+ query: |
+ SELECT sa.datname
+ , sa.usename
+ , sa.application_name
+ , states.state
+ , COALESCE(sa.count, 0) AS total
+ , COALESCE(sa.max_tx_secs, 0) AS max_tx_duration_seconds
+ FROM ( VALUES ('active')
+ , ('idle')
+ , ('idle in transaction')
+ , ('idle in transaction (aborted)')
+ , ('fastpath function call')
+ , ('disabled')
+ ) AS states(state)
+ LEFT JOIN (
+ SELECT datname
+ , state
+ , usename
+ , COALESCE(application_name, '') AS application_name
+ , COUNT(*)
+ , COALESCE(EXTRACT (EPOCH FROM (max(now() - xact_start))), 0) AS max_tx_secs
+ FROM pg_catalog.pg_stat_activity
+ GROUP BY datname, state, usename, application_name
+ ) sa ON states.state = sa.state
+ WHERE sa.usename IS NOT NULL
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - usename:
+ usage: "LABEL"
+ description: "Name of the user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - state:
+ usage: "LABEL"
+ description: "State of the backend"
+ - total:
+ usage: "GAUGE"
+ description: "Number of backends"
+ - max_tx_duration_seconds:
+ usage: "GAUGE"
+ description: "Maximum duration of a transaction in seconds"
+
+ backends_waiting:
+ query: |
+ SELECT count(*) AS total
+ FROM pg_catalog.pg_locks blocked_locks
+ JOIN pg_catalog.pg_locks blocking_locks
+ ON blocking_locks.locktype = blocked_locks.locktype
+ AND blocking_locks.database IS NOT DISTINCT FROM blocked_locks.database
+ AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
+ AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
+ AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
+ AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
+ AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
+ AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
+ AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
+ AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
+ AND blocking_locks.pid != blocked_locks.pid
+ JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
+ WHERE NOT blocked_locks.granted
+ metrics:
+ - total:
+ usage: "GAUGE"
+ description: "Total number of backends that are currently waiting on other queries"
+
+ pg_database:
+ query: |
+ SELECT datname
+ , pg_catalog.pg_database_size(datname) AS size_bytes
+ , pg_catalog.age(datfrozenxid) AS xid_age
+ , pg_catalog.mxid_age(datminmxid) AS mxid_age
+ FROM pg_catalog.pg_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of the database"
+ - size_bytes:
+ usage: "GAUGE"
+ description: "Disk space used by the database"
+ - xid_age:
+ usage: "GAUGE"
+ description: "Number of transactions from the frozen XID to the current one"
+ - mxid_age:
+ usage: "GAUGE"
+ description: "Number of multiple transactions (Multixact) from the frozen XID to the current one"
+
+ pg_postmaster:
+ query: |
+ SELECT EXTRACT(EPOCH FROM pg_postmaster_start_time) AS start_time
+ FROM pg_catalog.pg_postmaster_start_time()
+ metrics:
+ - start_time:
+ usage: "GAUGE"
+ description: "Time at which postgres started (based on epoch)"
+
+ pg_replication:
+ query: "SELECT CASE WHEN (
+ NOT pg_catalog.pg_is_in_recovery()
+ OR pg_catalog.pg_last_wal_receive_lsn() = pg_catalog.pg_last_wal_replay_lsn())
+ THEN 0
+ ELSE GREATEST (0,
+ EXTRACT(EPOCH FROM (now() - pg_catalog.pg_last_xact_replay_timestamp())))
+ END AS lag,
+ pg_catalog.pg_is_in_recovery() AS in_recovery,
+ EXISTS (TABLE pg_stat_wal_receiver) AS is_wal_receiver_up,
+ (SELECT count(*) FROM pg_catalog.pg_stat_replication) AS streaming_replicas"
+ metrics:
+ - lag:
+ usage: "GAUGE"
+ description: "Replication lag behind primary in seconds"
+ - in_recovery:
+ usage: "GAUGE"
+ description: "Whether the instance is in recovery"
+ - is_wal_receiver_up:
+ usage: "GAUGE"
+ description: "Whether the instance wal_receiver is up"
+ - streaming_replicas:
+ usage: "GAUGE"
+ description: "Number of streaming replicas connected to the instance"
+
+ pg_replication_slots:
+ query: |
+ SELECT slot_name,
+ slot_type,
+ database,
+ active,
+ (CASE pg_catalog.pg_is_in_recovery()
+ WHEN TRUE THEN pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_last_wal_receive_lsn(), restart_lsn)
+ ELSE pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), restart_lsn)
+ END) as pg_wal_lsn_diff
+ FROM pg_catalog.pg_replication_slots
+ WHERE NOT temporary
+ metrics:
+ - slot_name:
+ usage: "LABEL"
+ description: "Name of the replication slot"
+ - slot_type:
+ usage: "LABEL"
+ description: "Type of the replication slot"
+ - database:
+ usage: "LABEL"
+ description: "Name of the database"
+ - active:
+ usage: "GAUGE"
+ description: "Flag indicating whether the slot is active"
+ - pg_wal_lsn_diff:
+ usage: "GAUGE"
+ description: "Replication lag in bytes"
+
+ pg_stat_archiver:
+ query: |
+ SELECT archived_count
+ , failed_count
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_archived_time)), -1) AS seconds_since_last_archival
+ , COALESCE(EXTRACT(EPOCH FROM (now() - last_failed_time)), -1) AS seconds_since_last_failure
+ , COALESCE(EXTRACT(EPOCH FROM last_archived_time), -1) AS last_archived_time
+ , COALESCE(EXTRACT(EPOCH FROM last_failed_time), -1) AS last_failed_time
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_archived_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_archived_wal_start_lsn
+ , COALESCE(CAST(CAST('x'||pg_catalog.right(pg_catalog.split_part(last_failed_wal, '.', 1), 16) AS pg_catalog.bit(64)) AS pg_catalog.int8), -1) AS last_failed_wal_start_lsn
+ , EXTRACT(EPOCH FROM stats_reset) AS stats_reset_time
+ FROM pg_catalog.pg_stat_archiver
+ metrics:
+ - archived_count:
+ usage: "COUNTER"
+ description: "Number of WAL files that have been successfully archived"
+ - failed_count:
+ usage: "COUNTER"
+ description: "Number of failed attempts for archiving WAL files"
+ - seconds_since_last_archival:
+ usage: "GAUGE"
+ description: "Seconds since the last successful archival operation"
+ - seconds_since_last_failure:
+ usage: "GAUGE"
+ description: "Seconds since the last failed archival operation"
+ - last_archived_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving succeeded"
+ - last_failed_time:
+ usage: "GAUGE"
+ description: "Epoch of the last time WAL archiving failed"
+ - last_archived_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Archived WAL start LSN"
+ - last_failed_wal_start_lsn:
+ usage: "GAUGE"
+ description: "Last failed WAL LSN"
+ - stats_reset_time:
+ usage: "GAUGE"
+ description: "Time at which these statistics were last reset"
+
+ pg_stat_bgwriter:
+ query: |
+ SELECT checkpoints_timed
+ , checkpoints_req
+ , checkpoint_write_time
+ , checkpoint_sync_time
+ , buffers_checkpoint
+ , buffers_clean
+ , maxwritten_clean
+ , buffers_backend
+ , buffers_backend_fsync
+ , buffers_alloc
+ FROM pg_catalog.pg_stat_bgwriter
+ metrics:
+ - checkpoints_timed:
+ usage: "COUNTER"
+ description: "Number of scheduled checkpoints that have been performed"
+ - checkpoints_req:
+ usage: "COUNTER"
+ description: "Number of requested checkpoints that have been performed"
+ - checkpoint_write_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are written to disk, in milliseconds"
+ - checkpoint_sync_time:
+ usage: "COUNTER"
+ description: "Total amount of time that has been spent in the portion of checkpoint processing where files are synchronized to disk, in milliseconds"
+ - buffers_checkpoint:
+ usage: "COUNTER"
+ description: "Number of buffers written during checkpoints"
+ - buffers_clean:
+ usage: "COUNTER"
+ description: "Number of buffers written by the background writer"
+ - maxwritten_clean:
+ usage: "COUNTER"
+ description: "Number of times the background writer stopped a cleaning scan because it had written too many buffers"
+ - buffers_backend:
+ usage: "COUNTER"
+ description: "Number of buffers written directly by a backend"
+ - buffers_backend_fsync:
+ usage: "COUNTER"
+ description: "Number of times a backend had to execute its own fsync call (normally the background writer handles those even when the backend does its own write)"
+ - buffers_alloc:
+ usage: "COUNTER"
+ description: "Number of buffers allocated"
+
+ pg_stat_database:
+ query: |
+ SELECT datname
+ , xact_commit
+ , xact_rollback
+ , blks_read
+ , blks_hit
+ , tup_returned
+ , tup_fetched
+ , tup_inserted
+ , tup_updated
+ , tup_deleted
+ , conflicts
+ , temp_files
+ , temp_bytes
+ , deadlocks
+ , blk_read_time
+ , blk_write_time
+ FROM pg_catalog.pg_stat_database
+ metrics:
+ - datname:
+ usage: "LABEL"
+ description: "Name of this database"
+ - xact_commit:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been committed"
+ - xact_rollback:
+ usage: "COUNTER"
+ description: "Number of transactions in this database that have been rolled back"
+ - blks_read:
+ usage: "COUNTER"
+ description: "Number of disk blocks read in this database"
+ - blks_hit:
+ usage: "COUNTER"
+ description: "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary (this only includes hits in the PostgreSQL buffer cache, not the operating system's file system cache)"
+ - tup_returned:
+ usage: "COUNTER"
+ description: "Number of rows returned by queries in this database"
+ - tup_fetched:
+ usage: "COUNTER"
+ description: "Number of rows fetched by queries in this database"
+ - tup_inserted:
+ usage: "COUNTER"
+ description: "Number of rows inserted by queries in this database"
+ - tup_updated:
+ usage: "COUNTER"
+ description: "Number of rows updated by queries in this database"
+ - tup_deleted:
+ usage: "COUNTER"
+ description: "Number of rows deleted by queries in this database"
+ - conflicts:
+ usage: "COUNTER"
+ description: "Number of queries canceled due to conflicts with recovery in this database"
+ - temp_files:
+ usage: "COUNTER"
+ description: "Number of temporary files created by queries in this database"
+ - temp_bytes:
+ usage: "COUNTER"
+ description: "Total amount of data written to temporary files by queries in this database"
+ - deadlocks:
+ usage: "COUNTER"
+ description: "Number of deadlocks detected in this database"
+ - blk_read_time:
+ usage: "COUNTER"
+ description: "Time spent reading data file blocks by backends in this database, in milliseconds"
+ - blk_write_time:
+ usage: "COUNTER"
+ description: "Time spent writing data file blocks by backends in this database, in milliseconds"
+
+ pg_stat_replication:
+ primary: true
+ query: |
+ SELECT usename
+ , COALESCE(application_name, '') AS application_name
+ , COALESCE(client_addr::text, '') AS client_addr
+ , COALESCE(client_port::text, '') AS client_port
+ , EXTRACT(EPOCH FROM backend_start) AS backend_start
+ , COALESCE(pg_catalog.age(backend_xmin), 0) AS backend_xmin_age
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), sent_lsn) AS sent_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), write_lsn) AS write_diff_bytes
+ , pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), flush_lsn) AS flush_diff_bytes
+ , COALESCE(pg_catalog.pg_wal_lsn_diff(pg_catalog.pg_current_wal_lsn(), replay_lsn),0) AS replay_diff_bytes
+ , COALESCE((EXTRACT(EPOCH FROM write_lag)),0)::float AS write_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM flush_lag)),0)::float AS flush_lag_seconds
+ , COALESCE((EXTRACT(EPOCH FROM replay_lag)),0)::float AS replay_lag_seconds
+ FROM pg_catalog.pg_stat_replication
+ metrics:
+ - usename:
+ usage: "LABEL"
+ description: "Name of the replication user"
+ - application_name:
+ usage: "LABEL"
+ description: "Name of the application"
+ - client_addr:
+ usage: "LABEL"
+ description: "Client IP address"
+ - client_port:
+ usage: "LABEL"
+ description: "Client TCP port"
+ - backend_start:
+ usage: "COUNTER"
+ description: "Time when this process was started"
+ - backend_xmin_age:
+ usage: "COUNTER"
+ description: "The age of this standby's xmin horizon"
+ - sent_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location sent on this connection"
+ - write_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location written to disk by this standby server"
+ - flush_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location flushed to disk by this standby server"
+ - replay_diff_bytes:
+ usage: "GAUGE"
+ description: "Difference in bytes from the last write-ahead log location replayed into the database on this standby server"
+ - write_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it"
+ - flush_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it"
+ - replay_lag_seconds:
+ usage: "GAUGE"
+ description: "Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it"
+
+ pg_settings:
+ query: |
+ SELECT name,
+ CASE setting WHEN 'on' THEN '1' WHEN 'off' THEN '0' ELSE setting END AS setting
+ FROM pg_catalog.pg_settings
+ WHERE vartype IN ('integer', 'real', 'bool')
+ ORDER BY 1
+ metrics:
+ - name:
+ usage: "LABEL"
+ description: "Name of the setting"
+ - setting:
+ usage: "GAUGE"
+ description: "Setting value"
+kind: ConfigMap
+metadata:
+ labels:
+ cnpg.io/reload: ""
+ name: cnpg-default-monitoring
+ namespace: cnpg-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+spec:
+ ports:
+ - port: 443
+ targetPort: 9443
+ selector:
+ app.kubernetes.io/name: cloudnative-pg
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ name: cnpg-controller-manager
+ namespace: cnpg-system
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: cloudnative-pg
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: cloudnative-pg
+ spec:
+ containers:
+ - args:
+ - controller
+ - --leader-elect
+ - --config-map-name=cnpg-controller-manager-config
+ - --secret-name=cnpg-controller-manager-config
+ - --webhook-port=9443
+ command:
+ - /manager
+ env:
+ - name: OPERATOR_IMAGE_NAME
+ value: ghcr.io/cloudnative-pg/cloudnative-pg:1.22.2
+ - name: OPERATOR_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MONITORING_QUERIES_CONFIGMAP
+ value: cnpg-default-monitoring
+ image: ghcr.io/cloudnative-pg/cloudnative-pg:1.22.2
+ livenessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ name: manager
+ ports:
+ - containerPort: 8080
+ name: metrics
+ protocol: TCP
+ - containerPort: 9443
+ name: webhook-server
+ protocol: TCP
+ readinessProbe:
+ httpGet:
+ path: /readyz
+ port: 9443
+ scheme: HTTPS
+ resources:
+ limits:
+ cpu: 100m
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 100Mi
+ securityContext:
+ allowPrivilegeEscalation: false
+ capabilities:
+ drop:
+ - ALL
+ readOnlyRootFilesystem: true
+ runAsGroup: 10001
+ runAsUser: 10001
+ seccompProfile:
+ type: RuntimeDefault
+ volumeMounts:
+ - mountPath: /controller
+ name: scratch-data
+ - mountPath: /run/secrets/cnpg.io/webhook
+ name: webhook-certificates
+ securityContext:
+ runAsNonRoot: true
+ seccompProfile:
+ type: RuntimeDefault
+ serviceAccountName: cnpg-manager
+ terminationGracePeriodSeconds: 10
+ volumes:
+ - emptyDir: {}
+ name: scratch-data
+ - name: webhook-certificates
+ secret:
+ defaultMode: 420
+ optional: true
+ secretName: cnpg-webhook-cert
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+ name: cnpg-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: mbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: mcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /mutate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: mscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+ name: cnpg-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-backup
+ failurePolicy: Fail
+ name: vbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - backups
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-cluster
+ failurePolicy: Fail
+ name: vcluster.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - clusters
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-pooler
+ failurePolicy: Fail
+ name: vpooler.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - poolers
+ sideEffects: None
+- admissionReviewVersions:
+ - v1
+ clientConfig:
+ service:
+ name: cnpg-webhook-service
+ namespace: cnpg-system
+ path: /validate-postgresql-cnpg-io-v1-scheduledbackup
+ failurePolicy: Fail
+ name: vscheduledbackup.cnpg.io
+ rules:
+ - apiGroups:
+ - postgresql.cnpg.io
+ apiVersions:
+ - v1
+ operations:
+ - CREATE
+ - UPDATE
+ resources:
+ - scheduledbackups
+ sideEffects: None
diff --git a/tests/e2e/architecture_test.go b/tests/e2e/architecture_test.go
index d1dbd55bd6..ca2aa5131f 100644
--- a/tests/e2e/architecture_test.go
+++ b/tests/e2e/architecture_test.go
@@ -17,11 +17,9 @@ limitations under the License.
package e2e
import (
- "os"
- "strings"
-
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/tests"
+ "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
@@ -34,25 +32,10 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() {
level = tests.Low
)
- // we assume the image to be built for just amd64 as default. We try to calculate other envs inside the beforeEach
- // block
- imageArchitectures := []string{"amd64"}
-
BeforeEach(func() {
if testLevelEnv.Depth < int(level) {
Skip("Test depth is lower than the amount requested for this test")
}
-
- // TODO: instead of fetching the current architectures using the
- // PLATFORMS env variable, we should have a manager command which
- // returns all the architectures available in the current image.
-
- // Fetch the current image architectures via the PLATFORMS env variable.
- if architecturesFromUser, exist := os.LookupEnv("PLATFORMS"); exist {
- s := strings.ReplaceAll(architecturesFromUser, "linux/", "")
- arches := strings.Split(s, ",")
- imageArchitectures = arches
- }
})
// verifyArchitectureStatus checks that a given expectedValue (e.g. amd64)
@@ -101,6 +84,12 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() {
Expect(err).ToNot(HaveOccurred())
AssertCreateCluster(namespace, clusterName, clusterManifest, env)
+ // Fetch the operator's available architectures
+ operatorPod, err := env.GetOperatorPod()
+ Expect(err).ToNot(HaveOccurred())
+ imageArchitectures, err := utils.GetOperatorArchitectures(&operatorPod)
+ Expect(err).ToNot(HaveOccurred())
+
// Fetch the Cluster status
cluster, err := env.GetCluster(namespace, clusterName)
Expect(err).ToNot(HaveOccurred())
diff --git a/tests/e2e/asserts_test.go b/tests/e2e/asserts_test.go
index b54bc8977a..c66b5b4de6 100644
--- a/tests/e2e/asserts_test.go
+++ b/tests/e2e/asserts_test.go
@@ -723,6 +723,18 @@ func minioPath(serverName, fileName string) string {
return filepath.Join("*", serverName, "*", "*", fileName)
}
+// CheckPointAndSwitchWalOnPrimary trigger a checkpoint and switch wal on primary pod and returns the latest WAL file
+func CheckPointAndSwitchWalOnPrimary(namespace, clusterName string) string {
+ var latestWAL string
+ By("trigger checkpoint and switch wal on primary", func() {
+ pod, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ primary := pod.GetName()
+ latestWAL = switchWalAndGetLatestArchive(namespace, primary)
+ })
+ return latestWAL
+}
+
// AssertArchiveWalOnMinio archives WALs and verifies that they are in the storage
func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) {
var latestWALPath string
@@ -738,7 +750,7 @@ func AssertArchiveWalOnMinio(namespace, clusterName string, serverName string) {
By(fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath), func() {
Eventually(func() (int, error) {
// WALs are compressed with gzip in the fixture
- return testsUtils.CountFilesOnMinio(namespace, minioClientName, latestWALPath)
+ return testsUtils.CountFilesOnMinio(minioEnv, latestWALPath)
}, testTimeouts[testsUtils.WalsInMinio]).Should(BeEquivalentTo(1))
})
}
@@ -1812,7 +1824,7 @@ func prepareClusterForPITROnMinio(
testsUtils.ExecuteBackup(namespace, backupSampleFile, false, testTimeouts[testsUtils.BackupIsReady], env)
latestTar := minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testsUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testsUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeNumerically(">=", expectedVal),
fmt.Sprintf("verify the number of backups %v is greater than or equal to %v", latestTar,
expectedVal))
@@ -2552,6 +2564,10 @@ func GetYAMLContent(sampleFilePath string) ([]byte, error) {
"E2E_CSI_STORAGE_CLASS": csiStorageClass,
})
+ if serverName := os.Getenv("SERVER_NAME"); serverName != "" {
+ envVars["SERVER_NAME"] = serverName
+ }
+
yaml, err = testsUtils.Envsubst(envVars, data)
if err != nil {
return nil, wrapErr(err)
diff --git a/tests/e2e/backup_restore_test.go b/tests/e2e/backup_restore_test.go
index 4bc74c1768..59e657056d 100644
--- a/tests/e2e/backup_restore_test.go
+++ b/tests/e2e/backup_restore_test.go
@@ -19,14 +19,10 @@ package e2e
import (
"fmt"
"path/filepath"
- "strings"
- "github.com/thoas/go-funk"
- corev1 "k8s.io/api/core/v1"
ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/tests"
testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
@@ -67,20 +63,11 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
const (
backupFile = fixturesDir + "/backup/minio/backup-minio.yaml"
customQueriesSampleFile = fixturesDir + "/metrics/custom-queries-with-target-databases.yaml"
- minioCaSecName = "minio-server-ca-secret"
- minioTLSSecName = "minio-server-tls-secret"
)
clusterWithMinioSampleFile := fixturesDir + "/backup/minio/cluster-with-backup-minio.yaml.template"
BeforeAll(func() {
- //
- // IMPORTANT: this is to ensure that we test the old backup system too
- //
- if funk.RandomInt(0, 100) < 50 {
- GinkgoWriter.Println("---- Testing barman backups without the name flag ----")
- clusterWithMinioSampleFile = fixturesDir + "/backup/minio/cluster-with-backup-minio-legacy.yaml.template"
- }
if !IsLocal() {
Skip("This test is only run on local cluster")
}
@@ -95,18 +82,8 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
return env.DeleteNamespace(namespace)
})
- By("creating ca and tls certificate secrets", func() {
- // create CA certificates
- _, caPair, err := testUtils.CreateSecretCA(namespace, clusterName, minioCaSecName, true, env)
- Expect(err).ToNot(HaveOccurred())
-
- // sign and create secret using CA certificate and key
- serverPair, err := caPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
- []string{"minio-service.internal.mydomain.net, minio-service.default.svc, minio-service.default,"},
- )
- Expect(err).ToNot(HaveOccurred())
- serverSecret := serverPair.GenerateCertificateSecret(namespace, minioTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
@@ -114,21 +91,6 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
})
- By("setting up minio", func() {
- setup, err := testUtils.MinioSSLSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioSSLClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
- Expect(err).ToNot(HaveOccurred())
- })
-
// Create the curl client pod and wait for it to be ready.
By("setting up curl client pod", func() {
curlClient := testUtils.CurlClient(namespace)
@@ -148,7 +110,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterName, primaryPod.GetName(), "minio", "minio123")
+ namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
if err != nil {
return false, err
}
@@ -188,18 +150,27 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
backup = testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env)
AssertBackupConditionInClusterStatus(namespace, clusterName)
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastSuccessfulBackup, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastFailedBackup, err
}, 30).Should(BeEmpty())
})
@@ -227,7 +198,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
By("executing a second backup and verifying the number of backups on minio", func() {
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
// delete the first backup and create a second backup
@@ -242,7 +213,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
testUtils.ExecuteBackup(namespace, backupFile, false, testTimeouts[testUtils.BackupIsReady], env)
latestTar = minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(2))
})
@@ -273,7 +244,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
latestGZ := filepath.Join("*", clusterName, "*", "*.history.gz")
By(fmt.Sprintf("checking the previous number of .history files in minio, history file name is %v",
latestGZ), func() {
- previous, err = testUtils.CountFilesOnMinio(namespace, minioClientName, latestGZ)
+ previous, err = testUtils.CountFilesOnMinio(minioEnv, latestGZ)
Expect(err).ToNot(HaveOccurred())
})
@@ -281,7 +252,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
By("checking the number of .history after switchover", func() {
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestGZ)
+ return testUtils.CountFilesOnMinio(minioEnv, latestGZ)
}, 60).Should(BeNumerically(">", previous))
})
@@ -326,7 +297,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
testUtils.ExecuteBackup(namespace, backupStandbyFile, true, testTimeouts[testUtils.BackupIsReady], env)
AssertBackupConditionInClusterStatus(namespace, targetClusterName)
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, targetClusterName)
@@ -370,7 +341,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
testUtils.ExecuteBackup(namespace, backupWithTargetFile, true, testTimeouts[testUtils.BackupIsReady], env)
AssertBackupConditionInClusterStatus(namespace, targetClusterName)
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1))
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, targetClusterName)
@@ -429,7 +400,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
AssertBackupConditionInClusterStatus(namespace, customClusterName)
latestBaseTar := minioPath(clusterServerName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestBaseTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar)
}, 60).Should(BeEquivalentTo(1),
fmt.Sprintf("verify the number of backup %v is equals to 1", latestBaseTar))
// this is the second backup we take on the bucket
@@ -465,7 +436,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
// AssertScheduledBackupsImmediate creates at least two backups, we should find
// their base backups
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestBaseTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestBaseTar)
}, 60).Should(BeNumerically(">=", 2),
fmt.Sprintf("verify the number of backup %v is >= 2", latestBaseTar))
})
@@ -516,7 +487,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
AssertScheduledBackupsAreScheduled(namespace, scheduledBackupSampleFile, 300)
latestTar := minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeNumerically(">=", 2),
fmt.Sprintf("verify the number of backup %v is great than 2", latestTar))
})
@@ -526,7 +497,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
It("verify tags in backed files", func() {
AssertArchiveWalOnMinio(namespace, clusterName, clusterName)
- tags, err := testUtils.GetFileTagsOnMinio(namespace, minioClientName, "*[0-9].gz")
+ tags, err := testUtils.GetFileTagsOnMinio(minioEnv, "*[0-9].gz")
Expect(err).ToNot(HaveOccurred())
Expect(tags.Tags).ToNot(BeEmpty())
@@ -542,7 +513,7 @@ var _ = Describe("Backup and restore", Label(tests.LabelBackupRestore), func() {
AssertNewPrimary(namespace, clusterName, oldPrimary)
- tags, err = testUtils.GetFileTagsOnMinio(namespace, minioClientName, "*.history.gz")
+ tags, err = testUtils.GetFileTagsOnMinio(minioEnv, "*.history.gz")
Expect(err).ToNot(HaveOccurred())
Expect(tags.Tags).ToNot(BeEmpty())
})
@@ -849,37 +820,10 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
return env.DeleteNamespace(namespace)
})
- By("creating ca and tls certificate secrets", func() {
- // create CA certificate
- _, caPair, err := testUtils.CreateSecretCA(namespace, clusterName, minioCaSecName, true, env)
- Expect(err).ToNot(HaveOccurred())
+ AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
- // sign and create secret using CA certificate and key
- serverPair, err := caPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
- []string{"minio-service.internal.mydomain.net, minio-service.default.svc, minio-service.default,"},
- )
- Expect(err).ToNot(HaveOccurred())
- serverSecret := serverPair.GenerateCertificateSecret(namespace, minioTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
- Expect(err).ToNot(HaveOccurred())
- })
-
- By("creating the credentials for minio", func() {
- AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
- })
-
- By("setting up minio", func() {
- setup, err := testUtils.MinioSSLSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioSSLClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
@@ -891,7 +835,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterName, primaryPod.GetName(), "minio", "minio123")
+ namespace, clusterName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
if err != nil {
return false, err
}
@@ -921,11 +865,14 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
latestTar := minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(1),
fmt.Sprintf("verify the number of backup %v is equals to 1", latestTar))
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
})
@@ -965,7 +912,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
AssertBackupConditionInClusterStatus(namespace, clusterName)
latestTar := minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(2),
fmt.Sprintf("verify the number of backup %v is equals to 2", latestTar))
})
@@ -999,7 +946,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
AssertBackupConditionInClusterStatus(namespace, clusterName)
latestTar := minioPath(clusterName, "data.tar")
Eventually(func() (int, error) {
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestTar)
+ return testUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 60).Should(BeEquivalentTo(3),
fmt.Sprintf("verify the number of backup %v is great than 3", latestTar))
})
@@ -1256,7 +1203,7 @@ var _ = Describe("Clusters Recovery From Barman Object Store", Label(tests.Label
})
})
-var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), func() {
+/*var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), func() {
const (
level = tests.High
@@ -1267,6 +1214,10 @@ var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), f
var namespace, clusterName, namespace2 string
+ BeforeAll(func() {
+ Skip("Disabled by now - check issue #3967")
+ })
+
BeforeEach(func() {
if testLevelEnv.Depth < int(level) {
Skip("Test depth is lower than the amount requested for this test")
@@ -1279,8 +1230,8 @@ var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), f
env.DumpNamespaceObjects(namespace2, "out/"+namespace2+CurrentSpecReport().LeafNodeText+".log")
}
})
- Context("using minio as object storage", Ordered, func() {
- // This is a set of tests using a minio server to ensure backup and safet
+ FContext("using minio as object storage", Ordered, func() {
+ // This is a set of tests using a minio server to ensure backup and safety
// in case user configures the same destination path for more backups
const (
@@ -1313,27 +1264,15 @@ var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), f
return env.DeleteNamespace(namespace2)
})
+ caSecret, err := minioEnv.GetCaSecret(env, namespace)
+ Expect(err).ToNot(HaveOccurred())
+ _, err = testUtils.CreateObject(env, caSecret)
+ Expect(err).ToNot(HaveOccurred())
+
By("creating the credentials for minio", func() {
AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
})
- // setting up default minio
- By("setting up minio", func() {
- minio, err := testUtils.MinioDefaultSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
-
- err = testUtils.InstallMinio(env, minio, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioDefaultClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
- Expect(err).ToNot(HaveOccurred())
- })
-
// Creates the cluster
AssertCreateCluster(namespace, clusterName, clusterSampleFile, env)
@@ -1489,3 +1428,4 @@ var _ = Describe("Backup and restore Safety", Label(tests.LabelBackupRestore), f
})
})
})
+*/
diff --git a/tests/e2e/commons_test.go b/tests/e2e/commons_test.go
index 6ccdfd7035..3af40b0965 100644
--- a/tests/e2e/commons_test.go
+++ b/tests/e2e/commons_test.go
@@ -18,12 +18,6 @@ package e2e
import "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
-const (
- minioClientName = "mc"
- checkPointCmd = "psql -U postgres postgres -tAc 'CHECKPOINT;'"
- getLatestWalCmd = "psql -U postgres postgres -tAc 'SELECT pg_walfile_name(pg_switch_wal());'"
-)
-
// IsAKS checks if the running cluster is on AKS
func IsAKS() bool {
return *testCloudVendorEnv == utils.AKS
diff --git a/tests/e2e/drain_node_test.go b/tests/e2e/drain_node_test.go
index 4a20156549..a6ff75655c 100644
--- a/tests/e2e/drain_node_test.go
+++ b/tests/e2e/drain_node_test.go
@@ -21,6 +21,8 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
+ "k8s.io/utils/ptr"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
@@ -49,7 +51,8 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
if (node.Spec.Unschedulable != true) && (len(node.Spec.Taints) == 0) {
nodesWithLabels = append(nodesWithLabels, node.Name)
cmd := fmt.Sprintf("kubectl label node %v drain=drain --overwrite", node.Name)
- _, _, err := testsUtils.Run(cmd)
+ _, stderr, err := testsUtils.Run(cmd)
+ Expect(stderr).To(BeEmpty())
Expect(err).ToNot(HaveOccurred())
}
if len(nodesWithLabels) == 3 {
@@ -420,4 +423,102 @@ var _ = Describe("E2E Drain Node", Serial, Label(tests.LabelDisruptive, tests.La
Expect(err).ToNot(HaveOccurred())
})
})
+
+ Context("with a single instance cluster", Ordered, func() {
+ const namespacePrefix = "drain-node-e2e-single-instance"
+ const sampleFile = fixturesDir + "/drain-node/single-node-pdb-disabled.yaml.template"
+ const clusterName = "cluster-single-instance-pdb"
+ var namespace string
+
+ BeforeAll(func() {
+ var err error
+ // Create a cluster in a namespace we'll delete after the test
+ namespace, err = env.CreateUniqueNamespace(namespacePrefix)
+ Expect(err).ToNot(HaveOccurred())
+ DeferCleanup(func() error {
+ return env.DeleteNamespace(namespace)
+ })
+ })
+
+ When("the PDB is disabled", func() {
+ It("can drain the primary node and recover the cluster when uncordoned", func() {
+ AssertCreateCluster(namespace, clusterName, sampleFile, env)
+
+ By("waiting for the jobs to be removed", func() {
+ // Wait for jobs to be removed
+ timeout := 180
+ Eventually(func() (int, error) {
+ podList, err := env.GetPodList(namespace)
+ return len(podList.Items), err
+ }, timeout).Should(BeEquivalentTo(1))
+ })
+
+ // Load test data
+ primary, err := env.GetClusterPrimary(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+ AssertCreateTestData(namespace, clusterName, "test", primary)
+
+ // Drain the node containing the primary pod and store the list of running pods
+ _ = nodes.DrainPrimaryNode(namespace, clusterName,
+ testTimeouts[testsUtils.DrainNode], env)
+
+ By("verifying the primary is now pending", func() {
+ timeout := 180
+ // Expect a failover to have happened
+ Eventually(func() (string, error) {
+ pod, err := env.GetPod(namespace, clusterName+"-1")
+ if err != nil {
+ return "", err
+ }
+ return string(pod.Status.Phase), err
+ }, timeout).Should(BeEquivalentTo("Pending"))
+ })
+
+ By("uncordoning all nodes", func() {
+ err := nodes.UncordonAllNodes(env)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ AssertDataExpectedCountWithDatabaseName(namespace, primary.Name, "app", "test", 2)
+ })
+ })
+
+ When("the PDB is enabled", func() {
+ It("prevents the primary node from being drained", func() {
+ By("enabling PDB", func() {
+ cluster, err := env.GetCluster(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
+
+ updated := cluster.DeepCopy()
+ updated.Spec.EnablePDB = ptr.To(true)
+ err = env.Client.Patch(env.Ctx, updated, client.MergeFrom(cluster))
+ Expect(err).ToNot(HaveOccurred())
+ })
+
+ By("having the draining of the primary node rejected", func() {
+ var primaryNode string
+ Eventually(func(g Gomega) {
+ pod, err := env.GetClusterPrimary(namespace, clusterName)
+ g.Expect(err).ToNot(HaveOccurred())
+ primaryNode = pod.Spec.NodeName
+ }, 60).Should(Succeed())
+
+ // Draining the primary pod's node
+ Eventually(func(g Gomega) {
+ cmd := fmt.Sprintf(
+ "kubectl drain %v --ignore-daemonsets --delete-emptydir-data --force --timeout=%ds",
+ primaryNode, 60)
+ _, stderr, err := testsUtils.RunUnchecked(cmd)
+ g.Expect(err).To(HaveOccurred())
+ g.Expect(stderr).To(ContainSubstring("Cannot evict pod as it would violate the pod's disruption budget"))
+ }, 60).Should(Succeed())
+ })
+
+ By("uncordoning all nodes", func() {
+ err := nodes.UncordonAllNodes(env)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ })
+ })
+ })
})
diff --git a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-2.yaml.template b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-2.yaml.template
index 11df49fea7..831dff071a 100644
--- a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-2.yaml.template
+++ b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-2.yaml.template
@@ -35,7 +35,10 @@ spec:
target: primary
barmanObjectStore:
destinationPath: s3://cluster-backups/test/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
@@ -57,7 +60,10 @@ spec:
- name: pg-backup-minio
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-3.yaml.template b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-3.yaml.template
index b9a90477d6..282366a93a 100644
--- a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-3.yaml.template
+++ b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-3.yaml.template
@@ -35,7 +35,10 @@ spec:
target: primary
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
@@ -57,7 +60,10 @@ spec:
- name: external-cluster-minio-1
barmanObjectStore:
destinationPath: s3://cluster-backups/pg-backup-minio/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-4.yaml.template b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-4.yaml.template
index 39aecceebc..2caed17c25 100644
--- a/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-4.yaml.template
+++ b/tests/e2e/fixtures/backup/backup_restore_safety/external-clusters-minio-4.yaml.template
@@ -57,7 +57,10 @@ spec:
- name: pg-backup-minio
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template
index f6819bb8d7..391a26c17d 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-custom-servername.yaml.template
@@ -46,8 +46,8 @@ spec:
backup:
target: primary
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://pg-backup-minio-custom/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-legacy.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-legacy.yaml.template
index 095d6ea965..f6465d8098 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-legacy.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-legacy.yaml.template
@@ -49,7 +49,7 @@ spec:
target: primary
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-primary.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-primary.yaml.template
index 512ee9ccc8..e8dd1d9ac9 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-primary.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-primary.yaml.template
@@ -47,7 +47,7 @@ spec:
target: primary
barmanObjectStore:
destinationPath: s3://cluster-backups-standby/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-standby.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-standby.yaml.template
index 2b9cfe86b6..ec1a6dad5a 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-standby.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-standby.yaml.template
@@ -47,7 +47,7 @@ spec:
target: prefer-standby
barmanObjectStore:
destinationPath: s3://cluster-backups-standby/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-with-wal-max-parallel.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-with-wal-max-parallel.yaml.template
index 57b44a3b90..2821615375 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-with-wal-max-parallel.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio-with-wal-max-parallel.yaml.template
@@ -1,7 +1,7 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
- name: cluster-backup-minio-wal-max-parallel
+ name: backup-wal-max-parallel
spec:
instances: 2
@@ -38,8 +38,11 @@ spec:
backup:
target: primary
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ destinationPath: s3://backup-wal-max-parallel/
+ endpointURL: https://minio-service.minio:9000/
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio.yaml.template b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio.yaml.template
index 96452bfa5d..b305a2173d 100644
--- a/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio.yaml.template
+++ b/tests/e2e/fixtures/backup/minio/cluster-with-backup-minio.yaml.template
@@ -47,8 +47,8 @@ spec:
backup:
target: primary
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://pg-backup-minio/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-03.yaml.template b/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-03.yaml.template
index 3f1775aa9a..f3a977aa35 100644
--- a/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-03.yaml.template
+++ b/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-03.yaml.template
@@ -36,7 +36,7 @@ spec:
- name: source-cluster-minio
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-replica-04.yaml.template b/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-replica-04.yaml.template
index c71d94d909..b7cd14e672 100644
--- a/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-replica-04.yaml.template
+++ b/tests/e2e/fixtures/backup/recovery_external_clusters/external-clusters-minio-replica-04.yaml.template
@@ -39,7 +39,7 @@ spec:
- name: source-cluster-minio
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/backup/recovery_external_clusters/source-cluster-minio-01.yaml.template b/tests/e2e/fixtures/backup/recovery_external_clusters/source-cluster-minio-01.yaml.template
index 8c6a34f558..b4f2f73ffc 100644
--- a/tests/e2e/fixtures/backup/recovery_external_clusters/source-cluster-minio-01.yaml.template
+++ b/tests/e2e/fixtures/backup/recovery_external_clusters/source-cluster-minio-01.yaml.template
@@ -36,7 +36,7 @@ spec:
target: primary
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/drain-node/single-node-pdb-disabled.yaml.template b/tests/e2e/fixtures/drain-node/single-node-pdb-disabled.yaml.template
new file mode 100644
index 0000000000..707949ba69
--- /dev/null
+++ b/tests/e2e/fixtures/drain-node/single-node-pdb-disabled.yaml.template
@@ -0,0 +1,11 @@
+apiVersion: postgresql.cnpg.io/v1
+kind: Cluster
+metadata:
+ name: cluster-single-instance-pdb
+spec:
+ instances: 1
+ enablePDB: false
+
+ storage:
+ size: 1Gi
+ storageClass: ${E2E_DEFAULT_STORAGE_CLASS}
diff --git a/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-ro.yaml b/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-ro.yaml
index daf759b9b0..7e93f96324 100644
--- a/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-ro.yaml
+++ b/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-ro.yaml
@@ -19,4 +19,4 @@ spec:
poolMode: session
authQuerySecret:
name: cluster-pgbouncer-app
- authQuery: SELECT usename, passwd FROM pg_shadow WHERE usename=$1
+ authQuery: SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1
diff --git a/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-rw.yaml b/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-rw.yaml
index e8aa9c25d8..0ff8d9f889 100644
--- a/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-rw.yaml
+++ b/tests/e2e/fixtures/pgbouncer/pgbouncer-pooler-basic-auth-rw.yaml
@@ -12,4 +12,4 @@ spec:
poolMode: session
authQuerySecret:
name: cluster-pgbouncer-app
- authQuery: SELECT usename, passwd FROM pg_shadow WHERE usename=$1
+ authQuery: SELECT usename, passwd FROM pg_catalog.pg_shadow WHERE usename=$1
diff --git a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-archive-mode-always.yaml.template b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-archive-mode-always.yaml.template
index 654358531c..84b52bf136 100644
--- a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-archive-mode-always.yaml.template
+++ b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-archive-mode-always.yaml.template
@@ -32,7 +32,10 @@ spec:
barmanObjectStore:
destinationPath: s3://replica-cluster/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
@@ -48,7 +51,10 @@ spec:
backup:
barmanObjectStore:
destinationPath: s3://replica-cluster/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-backup.yaml.template b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-backup.yaml.template
index 4702cf554f..61a629502c 100644
--- a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-backup.yaml.template
+++ b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-backup.yaml.template
@@ -35,7 +35,10 @@ spec:
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
@@ -44,4 +47,4 @@ spec:
name: backup-storage-creds
key: KEY
wal:
- compression: gzip
\ No newline at end of file
+ compression: gzip
diff --git a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-snapshot.yaml.template b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-snapshot.yaml.template
index 7244e1e185..39afeabf9e 100644
--- a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-snapshot.yaml.template
+++ b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-from-snapshot.yaml.template
@@ -44,7 +44,10 @@ spec:
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
@@ -53,4 +56,4 @@ spec:
name: backup-storage-creds
key: KEY
wal:
- compression: gzip
\ No newline at end of file
+ compression: gzip
diff --git a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-src-with-backup.yaml.template b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-src-with-backup.yaml.template
index 1f9ec2fc2c..930e7f6e9f 100644
--- a/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-src-with-backup.yaml.template
+++ b/tests/e2e/fixtures/replica_mode_cluster/cluster-replica-src-with-backup.yaml.template
@@ -37,7 +37,10 @@ spec:
className: ${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}
barmanObjectStore:
destinationPath: s3://cluster-backups/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: backup-storage-creds
diff --git a/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces-pitr.yaml.template b/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces-pitr.yaml.template
index 3e01d34c70..43a92e98a7 100644
--- a/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces-pitr.yaml.template
+++ b/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces-pitr.yaml.template
@@ -50,8 +50,8 @@ spec:
externalClusters:
- name: cluster-tablespaces-volume-snapshot
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-tablespaces-volume-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
@@ -61,4 +61,4 @@ spec:
key: ID
secretAccessKey:
name: backup-storage-creds
- key: KEY
\ No newline at end of file
+ key: KEY
diff --git a/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces.yaml.template b/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces.yaml.template
index 4620a16029..7bcb11cdab 100644
--- a/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces.yaml.template
+++ b/tests/e2e/fixtures/tablespaces/cluster-volume-snapshot-tablespaces.yaml.template
@@ -33,8 +33,8 @@ spec:
className: ${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}
snapshotOwnerReference: cluster
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-tablespaces-volume-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/tablespaces/cluster-with-tablespaces.yaml.template b/tests/e2e/fixtures/tablespaces/cluster-with-tablespaces.yaml.template
index 63f40b6308..fd8ceb0216 100644
--- a/tests/e2e/fixtures/tablespaces/cluster-with-tablespaces.yaml.template
+++ b/tests/e2e/fixtures/tablespaces/cluster-with-tablespaces.yaml.template
@@ -52,8 +52,8 @@ spec:
backup:
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-tablespaces/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/upgrade/cluster1.yaml.template b/tests/e2e/fixtures/upgrade/cluster1.yaml.template
index 6e52f297e7..e2677ce439 100644
--- a/tests/e2e/fixtures/upgrade/cluster1.yaml.template
+++ b/tests/e2e/fixtures/upgrade/cluster1.yaml.template
@@ -38,8 +38,12 @@ spec:
backup:
target: primary
barmanObjectStore:
+ serverName: ${SERVER_NAME}
destinationPath: s3://cluster-full-backup/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: aws-creds
@@ -52,7 +56,7 @@ spec:
data:
compression: gzip
immediateCheckpoint: true
- jobs: 2
+ jobs: 4
resources:
requests:
diff --git a/tests/e2e/fixtures/upgrade/cluster2.yaml.template b/tests/e2e/fixtures/upgrade/cluster2.yaml.template
index 7825bd7d0f..f100809da7 100644
--- a/tests/e2e/fixtures/upgrade/cluster2.yaml.template
+++ b/tests/e2e/fixtures/upgrade/cluster2.yaml.template
@@ -38,8 +38,12 @@ spec:
backup:
target: primary
barmanObjectStore:
+ serverName: ${SERVER_NAME}
destinationPath: s3://cluster2-full-backup/
- endpointURL: http://minio-service:9000
+ endpointURL: https://minio-service.minio:9000
+ endpointCA:
+ key: ca.crt
+ name: minio-server-ca-secret
s3Credentials:
accessKeyId:
name: aws-creds
diff --git a/tests/e2e/fixtures/upgrade/scheduled-backup.yaml b/tests/e2e/fixtures/upgrade/scheduled-backup.yaml
index 130e936955..3babe3be25 100644
--- a/tests/e2e/fixtures/upgrade/scheduled-backup.yaml
+++ b/tests/e2e/fixtures/upgrade/scheduled-backup.yaml
@@ -3,7 +3,7 @@ kind: ScheduledBackup
metadata:
name: scheduled-backup
spec:
- schedule: "*/30 * * * * *"
+ schedule: "0 */1 * * * *"
target: primary
cluster:
name: cluster1
diff --git a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-restore.yaml.template b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-restore.yaml.template
index 1ca5e636ef..45362d1f3d 100644
--- a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-restore.yaml.template
+++ b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-restore.yaml.template
@@ -17,7 +17,7 @@ spec:
bootstrap:
recovery:
- source: cluster-pvc-snapshot
+ source: cluster-pvc-hot-snapshot
volumeSnapshots:
storage:
name: ${SNAPSHOT_PITR_PGDATA}
@@ -30,10 +30,10 @@ spec:
externalClusters:
- - name: cluster-pvc-snapshot
+ - name: cluster-pvc-hot-snapshot
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-pvc-hot-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-snapshot.yaml.template b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-snapshot.yaml.template
index a42eb96fa3..851cd08e15 100644
--- a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-snapshot.yaml.template
+++ b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-hot-snapshot.yaml.template
@@ -1,7 +1,7 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
- name: cluster-pvc-snapshot
+ name: cluster-pvc-hot-snapshot
spec:
instances: 2
primaryUpdateStrategy: unsupervised
@@ -23,8 +23,8 @@ spec:
immediateCheckpoint: true
waitForArchive: true
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-pvc-hot-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot-restore.yaml.template b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot-restore.yaml.template
index 0f5c22684d..e79f4b6a11 100644
--- a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot-restore.yaml.template
+++ b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot-restore.yaml.template
@@ -32,8 +32,8 @@ spec:
externalClusters:
- name: cluster-pvc-snapshot
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-pvc-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot.yaml.template b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot.yaml.template
index f9eec67f9f..62895c7904 100644
--- a/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot.yaml.template
+++ b/tests/e2e/fixtures/volume_snapshot/cluster-pvc-snapshot.yaml.template
@@ -18,8 +18,8 @@ spec:
volumeSnapshot:
className: ${E2E_DEFAULT_VOLUMESNAPSHOT_CLASS}
barmanObjectStore:
- destinationPath: s3://cluster-backups/
- endpointURL: https://minio-service:9000
+ destinationPath: s3://cluster-pvc-snapshot/
+ endpointURL: https://minio-service.minio:9000
endpointCA:
key: ca.crt
name: minio-server-ca-secret
diff --git a/tests/e2e/replica_mode_cluster_test.go b/tests/e2e/replica_mode_cluster_test.go
index 7e0f8f8399..6415ce7f9d 100644
--- a/tests/e2e/replica_mode_cluster_test.go
+++ b/tests/e2e/replica_mode_cluster_test.go
@@ -134,17 +134,9 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
By("creating the credentials for minio", func() {
AssertStorageCredentialsAreCreated(replicaNamespace, "backup-storage-creds", "minio", "minio123")
})
- By("setting up minio", func() {
- minio, err := testUtils.MinioDefaultSetup(replicaNamespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, minio, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioDefaultClient(replicaNamespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
+
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, replicaNamespace)
Expect(err).ToNot(HaveOccurred())
})
@@ -203,18 +195,8 @@ var _ = Describe("Replica Mode", Label(tests.LabelReplication), func() {
AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
})
- By("setting up minio", func() {
- minio, err := testUtils.MinioDefaultSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, minio, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioDefaultClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
diff --git a/tests/e2e/suite_test.go b/tests/e2e/suite_test.go
index c52f73be63..601ae14a9a 100644
--- a/tests/e2e/suite_test.go
+++ b/tests/e2e/suite_test.go
@@ -64,6 +64,12 @@ var (
operatorLogDumped bool
quickDeletionPeriod = int64(1)
testTimeouts map[utils.Timeout]int
+ minioEnv = &utils.MinioEnv{
+ Namespace: "minio",
+ ServiceName: "minio-service.minio",
+ CaSecretName: "minio-server-ca-secret",
+ TLSSecret: "minio-server-tls-secret",
+ }
)
var _ = SynchronizedBeforeSuite(func() []byte {
@@ -71,40 +77,67 @@ var _ = SynchronizedBeforeSuite(func() []byte {
env, err = utils.NewTestingEnvironment()
Expect(err).ShouldNot(HaveOccurred())
- pod, err := utils.GetPsqlClient(psqlClientNamespace, env)
+ psqlPod, err := utils.GetPsqlClient(psqlClientNamespace, env)
Expect(err).ShouldNot(HaveOccurred())
DeferCleanup(func() {
err := env.DeleteNamespaceAndWait(psqlClientNamespace, 300)
Expect(err).ToNot(HaveOccurred())
})
- // here we serialized psql client pod object info and will be
- // accessible to all nodes (specs)
- psqlPodJSONObj, err := json.Marshal(pod)
+
+ // Set up a global MinIO service on his own namespace
+ err = env.CreateNamespace(minioEnv.Namespace)
+ Expect(err).ToNot(HaveOccurred())
+ DeferCleanup(func() {
+ err := env.DeleteNamespaceAndWait(minioEnv.Namespace, 300)
+ Expect(err).ToNot(HaveOccurred())
+ })
+ minioEnv.Timeout = uint(testTimeouts[utils.MinioInstallation])
+ minioClient, err := utils.MinioDeploy(minioEnv, env)
+ Expect(err).ToNot(HaveOccurred())
+
+ caSecret := minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName)
+ minioEnv.CaSecretObj = *caSecret
+ objs := map[string]corev1.Pod{
+ "psql": *psqlPod,
+ "minio": *minioClient,
+ }
+
+ jsonObjs, err := json.Marshal(objs)
if err != nil {
panic(err)
}
- return psqlPodJSONObj
-}, func(data []byte) {
+
+ return jsonObjs
+}, func(jsonObjs []byte) {
var err error
// We are creating new testing env object again because above testing env can not serialize and
// accessible to all nodes (specs)
if env, err = utils.NewTestingEnvironment(); err != nil {
panic(err)
}
+
_ = k8sscheme.AddToScheme(env.Scheme)
_ = apiv1.AddToScheme(env.Scheme)
+
if testLevelEnv, err = tests.TestLevel(); err != nil {
panic(err)
}
+
if testTimeouts, err = utils.Timeouts(); err != nil {
panic(err)
}
+
if testCloudVendorEnv, err = utils.TestCloudVendor(); err != nil {
panic(err)
}
- if err := json.Unmarshal(data, &psqlClientPod); err != nil {
+
+ var objs map[string]*corev1.Pod
+ if err := json.Unmarshal(jsonObjs, &objs); err != nil {
panic(err)
}
+
+ psqlClientPod = objs["psql"]
+ minioEnv.Client = objs["minio"]
})
var _ = SynchronizedAfterSuite(func() {
diff --git a/tests/e2e/tablespaces_test.go b/tests/e2e/tablespaces_test.go
index d334cdf07b..6a97edfe49 100644
--- a/tests/e2e/tablespaces_test.go
+++ b/tests/e2e/tablespaces_test.go
@@ -33,7 +33,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils/logs"
@@ -53,8 +52,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
const (
level = tests.Medium
namespacePrefix = "tablespaces"
- minioCaSecName = "minio-server-ca-secret"
- minioTLSSecName = "minio-server-tls-secret"
)
var (
clusterName string
@@ -108,41 +105,6 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
}
- minioSetup := func(namespace string) {
- By("creating ca and tls certificate secrets", func() {
- // create CA certificates
- _, caPair, err := testUtils.CreateSecretCA(namespace, clusterName, minioCaSecName, true, env)
- Expect(err).ToNot(HaveOccurred())
-
- // sign and create secret using CA certificate and key
- serverPair, err := caPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
- []string{"minio-service.internal.mydomain.net, minio-service.default.svc, minio-service.default,"},
- )
- Expect(err).ToNot(HaveOccurred())
- serverSecret := serverPair.GenerateCertificateSecret(namespace, minioTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
- Expect(err).ToNot(HaveOccurred())
- })
-
- By("creating the credentials for minio", func() {
- AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
- })
-
- By("setting up minio", func() {
- setup, err := testUtils.MinioSSLSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioSSLClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
- Expect(err).ToNot(HaveOccurred())
- })
- }
Context("on a new cluster with tablespaces", Ordered, func() {
var backupName string
var err error
@@ -162,10 +124,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
// Create a cluster in a namespace we'll delete after the test
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
+
+ // We create the MinIO credentials required to login into the system
+ AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
+
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
DeferCleanup(func() error {
return env.DeleteNamespace(namespace)
})
- minioSetup(namespace)
clusterSetup(clusterManifest)
})
@@ -223,20 +193,29 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
})
By("verifying the number of tars in minio", func() {
- latestBaseBackupContainsExpectedTars(clusterName, namespace, 1, 3)
+ latestBaseBackupContainsExpectedTars(clusterName, 1, 3)
})
By("verifying backup status", func() {
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastSuccessfulBackup, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastFailedBackup, err
}, 30).Should(BeEmpty())
})
@@ -319,20 +298,29 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
eventuallyHasCompletedBackups(namespace, backups)
// in the latest base backup, we expect 4 tars
// (data.tar + 3 tars for each of the 3 tablespaces)
- latestBaseBackupContainsExpectedTars(clusterName, namespace, backups, 4)
+ latestBaseBackupContainsExpectedTars(clusterName, backups, 4)
})
By("verifying backup status", func() {
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.FirstRecoverabilityPoint, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastSuccessfulBackup, err
}, 30).ShouldNot(BeEmpty())
Eventually(func() (string, error) {
cluster, err := env.GetCluster(namespace, clusterName)
+ if err != nil {
+ return "", err
+ }
return cluster.Status.LastFailedBackup, err
}, 30).Should(BeEmpty())
})
@@ -403,11 +391,18 @@ var _ = Describe("Tablespaces tests", Label(tests.LabelTablespaces,
// Create a cluster in a namespace we'll delete after the test
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
+
+ // We create the required credentials for MinIO
+ AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
+
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
+ Expect(err).ToNot(HaveOccurred())
+ })
+
DeferCleanup(func() error {
return env.DeleteNamespace(namespace)
})
-
- minioSetup(namespace)
clusterSetup(clusterManifest)
})
@@ -1216,7 +1211,6 @@ func eventuallyHasCompletedBackups(namespace string, numBackups int) {
func latestBaseBackupContainsExpectedTars(
clusterName string,
- namespace string,
numBackups int,
expectedTars int,
) {
@@ -1224,7 +1218,7 @@ func latestBaseBackupContainsExpectedTars(
// we list the backup.info files to get the listing of base backups
// directories in minio
backupInfoFiles := filepath.Join("*", clusterName, "base", "*", "*.info")
- ls, err := testUtils.ListFilesOnMinio(namespace, minioClientName, backupInfoFiles)
+ ls, err := testUtils.ListFilesOnMinio(minioEnv, backupInfoFiles)
g.Expect(err).ShouldNot(HaveOccurred())
frags := strings.Split(ls, "\n")
slices.Sort(frags)
@@ -1232,10 +1226,10 @@ func latestBaseBackupContainsExpectedTars(
g.Expect(frags).To(HaveLen(numBackups), report)
latestBaseBackup := filepath.Dir(frags[numBackups-1])
tarsInLastBackup := strings.TrimPrefix(filepath.Join(latestBaseBackup, "*.tar"), "minio/")
- listing, err := testUtils.ListFilesOnMinio(namespace, minioClientName, tarsInLastBackup)
+ listing, err := testUtils.ListFilesOnMinio(minioEnv, tarsInLastBackup)
g.Expect(err).ShouldNot(HaveOccurred())
report += fmt.Sprintf("tar listing:\n%s\n", listing)
- numTars, err := testUtils.CountFilesOnMinio(namespace, minioClientName, tarsInLastBackup)
+ numTars, err := testUtils.CountFilesOnMinio(minioEnv, tarsInLastBackup)
g.Expect(err).ShouldNot(HaveOccurred())
g.Expect(numTars).To(Equal(expectedTars), report)
}, 120).Should(Succeed())
diff --git a/tests/e2e/upgrade_test.go b/tests/e2e/upgrade_test.go
index 5ca2979b2c..bdc1204c8c 100644
--- a/tests/e2e/upgrade_test.go
+++ b/tests/e2e/upgrade_test.go
@@ -25,7 +25,6 @@ import (
"time"
"github.com/thoas/go-funk"
- appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
@@ -35,7 +34,6 @@ import (
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
"github.com/cloudnative-pg/cloudnative-pg/pkg/specs"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
testsUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
@@ -86,16 +84,17 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// This is a cluster of the previous version, created before the operator upgrade
clusterName1 = "cluster1"
sampleFile = fixturesDir + "/upgrade/cluster1.yaml.template"
+ minioPath1 = "minio/cluster-full-backup"
// This is a cluster of the previous version, created after the operator upgrade
clusterName2 = "cluster2"
sampleFile2 = fixturesDir + "/upgrade/cluster2.yaml.template"
+ minioPath2 = "minio/cluster2-full-backup"
backupName = "cluster-backup"
backupFile = fixturesDir + "/upgrade/backup1.yaml"
restoreFile = fixturesDir + "/upgrade/cluster-restore.yaml.template"
scheduledBackupFile = fixturesDir + "/upgrade/scheduled-backup.yaml"
- countBackupsScript = "sh -c 'mc find minio --name data.tar.gz | wc -l'"
pgBouncerSampleFile = fixturesDir + "/upgrade/pgbouncer.yaml"
pgBouncerName = "pgbouncer"
@@ -143,30 +142,13 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// Check that the amount of backups is increasing on minio.
// This check relies on the fact that nothing is performing backups
// but a single scheduled backups during the check
- AssertScheduledBackupsAreScheduled := func(upgradeNamespace string) {
+ AssertScheduledBackupsAreScheduled := func(serverName string) {
By("verifying scheduled backups are still happening", func() {
- out, _, err := env.ExecCommandInContainer(
- testsUtils.ContainerLocator{
- Namespace: upgradeNamespace,
- PodName: minioClientName,
- ContainerName: "mc",
- }, nil,
- "sh", "-c", "mc find minio --name data.tar.gz | wc -l")
- Expect(err).ToNot(HaveOccurred())
- currentBackups, err := strconv.Atoi(strings.Trim(out, "\n"))
+ latestTar := minioPath(serverName, "data.tar.gz")
+ currentBackups, err := testsUtils.CountFilesOnMinio(minioEnv, latestTar)
Expect(err).ToNot(HaveOccurred())
Eventually(func() (int, error) {
- out, _, err := env.ExecCommandInContainer(
- testsUtils.ContainerLocator{
- Namespace: upgradeNamespace,
- PodName: minioClientName,
- ContainerName: "mc",
- }, nil,
- "sh", "-c", "mc find minio --name data.tar.gz | wc -l")
- if err != nil {
- return 0, err
- }
- return strconv.Atoi(strings.Trim(out, "\n"))
+ return testsUtils.CountFilesOnMinio(minioEnv, latestTar)
}, 120).Should(BeNumerically(">", currentBackups))
})
}
@@ -315,6 +297,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
GinkgoWriter.Println("cleaning up")
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
+ // Dump the minio namespace when failed
+ env.DumpNamespaceObjects(minioEnv.Namespace, "out/"+CurrentSpecReport().LeafNodeText+"minio.log")
// Dump the operator namespace, as operator is changing too
env.DumpOperator(operatorNamespace,
"out/"+CurrentSpecReport().LeafNodeText+"operator.log")
@@ -324,9 +308,23 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
if err != nil {
return fmt.Errorf("could not cleanup. Failed to delete namespace: %v", err)
}
+
// Delete the operator's namespace in case that the previous test make corrupted changes to
// the operator's namespace so that affects subsequent test
- return env.DeleteNamespaceAndWait(operatorNamespace, 60)
+ if err := env.DeleteNamespaceAndWait(operatorNamespace, 60); err != nil {
+ return fmt.Errorf("could not cleanup, failed to delete operator namespace: %v", err)
+ }
+
+ if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath1); err != nil {
+ return fmt.Errorf("encountered an error while cleaning up minio: %v", err)
+ }
+
+ if _, err := testsUtils.CleanFilesOnMinio(minioEnv, minioPath2); err != nil {
+ return fmt.Errorf("encountered an error while cleaning up minio: %v", err)
+ }
+
+ GinkgoWriter.Println("cleaning up done")
+ return nil
}
assertCreateNamespace := func(namespacePrefix string) string {
@@ -384,6 +382,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
}
assertClustersWorkAfterOperatorUpgrade := func(upgradeNamespace, operatorManifest string) {
+ // generate random serverNames for the clusters each time
+ serverName1 := fmt.Sprintf("%s-%d", clusterName1, funk.RandomInt(0, 9999))
+ serverName2 := fmt.Sprintf("%s-%d", clusterName2, funk.RandomInt(0, 9999))
// Create the secrets used by the clusters and minio
By("creating the postgres secrets", func() {
CreateResourceFromFile(upgradeNamespace, pgSecrets)
@@ -391,52 +392,18 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
By("creating the cloud storage credentials", func() {
AssertStorageCredentialsAreCreated(upgradeNamespace, "aws-creds", "minio", "minio123")
})
-
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, upgradeNamespace)
+ Expect(err).ToNot(HaveOccurred())
+ })
// Create the cluster. Since it will take a while, we'll do more stuff
// in parallel and check for it to be up later.
By(fmt.Sprintf("creating a Cluster in the '%v' upgradeNamespace",
upgradeNamespace), func() {
- CreateResourceFromFile(upgradeNamespace, sampleFile)
- })
-
- By("setting up minio", func() {
- setup, err := testsUtils.MinioDefaultSetup(upgradeNamespace)
+ // set the serverName to a random name
+ err := os.Setenv("SERVER_NAME", serverName1)
Expect(err).ToNot(HaveOccurred())
- err = testsUtils.InstallMinio(env, setup, uint(testTimeouts[testsUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testsUtils.MinioDefaultClient(upgradeNamespace)
- err := testsUtils.PodCreateAndWaitForReady(env, &minioClient, uint(testTimeouts[testsUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- By("having minio resources ready", func() {
- // Wait for the minio pod to be ready
- deploymentName := "minio"
- deploymentNamespacedName := types.NamespacedName{
- Namespace: upgradeNamespace,
- Name: deploymentName,
- }
- Eventually(func() (int32, error) {
- deployment := &appsv1.Deployment{}
- err := env.Client.Get(env.Ctx, deploymentNamespacedName, deployment)
- return deployment.Status.ReadyReplicas, err
- }, 300).Should(BeEquivalentTo(1))
-
- // Wait for the minio client pod to be ready
- mcNamespacedName := types.NamespacedName{
- Namespace: upgradeNamespace,
- Name: minioClientName,
- }
- Eventually(func() (bool, error) {
- mc := &corev1.Pod{}
- err := env.Client.Get(env.Ctx, mcNamespacedName, mc)
- return utils.IsPodReady(*mc), err
- }, 180).Should(BeTrue())
+ CreateResourceFromFile(upgradeNamespace, sampleFile)
})
// Cluster ready happens after minio is ready
@@ -461,36 +428,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
Expect(err).ToNot(HaveOccurred())
})
- // Create a WAL on the primary and check if it arrives on
- // minio within a short time.
- By("archiving WALs on minio", func() {
- primary := clusterName1 + "-1"
- out, _, err := env.ExecCommandInInstancePod(
- testsUtils.PodLocator{
- Namespace: upgradeNamespace,
- PodName: primary,
- }, nil,
- "psql", "-U", "postgres", "appdb", "-v", "SHOW_ALL_RESULTS=off", "-tAc",
- "CHECKPOINT; SELECT pg_walfile_name(pg_switch_wal())")
- Expect(err).ToNot(HaveOccurred())
- latestWAL := strings.TrimSpace(out)
-
- Eventually(func() (int, error, error) {
- // In the fixture WALs are compressed with gzip
- findCmd := fmt.Sprintf(
- "mc find minio --name %v.gz | wc -l",
- latestWAL)
- out, _, err := env.ExecCommandInContainer(
- testsUtils.ContainerLocator{
- Namespace: upgradeNamespace,
- PodName: minioClientName,
- ContainerName: "mc",
- }, nil,
- "sh", "-c", findCmd)
- value, atoiErr := strconv.Atoi(strings.Trim(out, "\n"))
- return value, err, atoiErr
- }, 60).Should(BeEquivalentTo(1))
- })
+ AssertArchiveWalOnMinio(upgradeNamespace, clusterName1, serverName1)
By("uploading a backup on minio", func() {
// We create a Backup
@@ -512,8 +450,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
Eventually(func() (int, error, error) {
out, _, err := env.ExecCommandInContainer(
testsUtils.ContainerLocator{
- Namespace: upgradeNamespace,
- PodName: minioClientName,
+ Namespace: minioEnv.Namespace,
+ PodName: minioEnv.Client.Name,
ContainerName: "mc",
}, nil,
"sh", "-c", "mc find minio --name data.tar.gz | wc -l")
@@ -526,7 +464,7 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
// We create a ScheduledBackup
CreateResourceFromFile(upgradeNamespace, scheduledBackupFile)
})
- AssertScheduledBackupsAreScheduled(upgradeNamespace)
+ AssertScheduledBackupsAreScheduled(serverName1)
assertPGBouncerPodsAreReady(upgradeNamespace, pgBouncerSampleFile, 2)
@@ -598,6 +536,9 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
AssertConfUpgrade(clusterName1, upgradeNamespace)
By("installing a second Cluster on the upgraded operator", func() {
+ // set the serverName to a random name
+ err := os.Setenv("SERVER_NAME", serverName2)
+ Expect(err).ToNot(HaveOccurred())
CreateResourceFromFile(upgradeNamespace, sampleFile2)
AssertClusterIsReady(upgradeNamespace, clusterName2, testTimeouts[testsUtils.ClusterIsReady], env)
})
@@ -649,7 +590,8 @@ var _ = Describe("Upgrade", Label(tests.LabelUpgrade, tests.LabelNoOpenshift), O
return strings.Trim(out, "\n"), err
}, 180).Should(BeEquivalentTo("2"))
})
- AssertScheduledBackupsAreScheduled(upgradeNamespace)
+ AssertArchiveWalOnMinio(upgradeNamespace, clusterName1, serverName1)
+ AssertScheduledBackupsAreScheduled(serverName1)
By("scaling down the pooler to 0", func() {
assertPGBouncerPodsAreReady(upgradeNamespace, pgBouncerSampleFile, 2)
diff --git a/tests/e2e/volume_snapshot_test.go b/tests/e2e/volume_snapshot_test.go
index 86963fc730..a299969657 100644
--- a/tests/e2e/volume_snapshot_test.go
+++ b/tests/e2e/volume_snapshot_test.go
@@ -29,7 +29,6 @@ import (
k8client "sigs.k8s.io/controller-runtime/pkg/client"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
- "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
"github.com/cloudnative-pg/cloudnative-pg/tests"
testUtils "github.com/cloudnative-pg/cloudnative-pg/tests/utils"
@@ -106,6 +105,8 @@ var _ = Describe("Verify Volume Snapshot",
)
Expect(err).ToNot(HaveOccurred())
+ // trigger a checkpoint as the backup may run on standby
+ CheckPointAndSwitchWalOnPrimary(namespace, clusterName)
Eventually(func(g Gomega) {
backupList, err := env.GetBackupList(namespace)
g.Expect(err).ToNot(HaveOccurred())
@@ -146,11 +147,6 @@ var _ = Describe("Verify Volume Snapshot",
snapshotWalEnv = "SNAPSHOT_PITR_PGWAL"
recoveryTargetTimeEnv = "SNAPSHOT_PITR"
)
- // minio constants
- const (
- minioCaSecName = "minio-server-ca-secret"
- minioTLSSecName = "minio-server-tls-secret"
- )
// file constants
const (
clusterToSnapshot = filesDir + "/cluster-pvc-snapshot.yaml.template"
@@ -185,39 +181,12 @@ var _ = Describe("Verify Volume Snapshot",
return env.DeleteNamespace(namespace)
})
- By("creating ca and tls certificate secrets", func() {
- // create CA certificates
- _, caPair, err := testUtils.CreateSecretCA(namespace, clusterToSnapshotName, minioCaSecName, true, env)
- Expect(err).ToNot(HaveOccurred())
-
- // sign and create secret using CA certificate and key
- serverPair, err := caPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
- []string{"minio-service.internal.mydomain.net, minio-service.default.svc, minio-service.default,"},
- )
- Expect(err).ToNot(HaveOccurred())
- serverSecret := serverPair.GenerateCertificateSecret(namespace, minioTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
- By("creating the credentials for minio", func() {
- AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
- })
-
- By("setting up minio", func() {
- setup, err := testUtils.MinioSSLSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioSSLClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
- Expect(err).ToNot(HaveOccurred())
- })
+ AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
})
It("correctly executes PITR with a cold snapshot", func() {
@@ -241,7 +210,7 @@ var _ = Describe("Verify Volume Snapshot",
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123")
+ namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
if err != nil {
return false, err
}
@@ -261,7 +230,8 @@ var _ = Describe("Verify Volume Snapshot",
apiv1.BackupMethodVolumeSnapshot,
env)
Expect(err).ToNot(HaveOccurred())
-
+ // trigger a checkpoint
+ CheckPointAndSwitchWalOnPrimary(namespace, clusterToSnapshotName)
Eventually(func(g Gomega) {
err = env.Client.Get(env.Ctx, types.NamespacedName{
Namespace: namespace,
@@ -551,6 +521,7 @@ var _ = Describe("Verify Volume Snapshot",
Expect(err).NotTo(HaveOccurred())
})
+ CheckPointAndSwitchWalOnPrimary(namespace, clusterToBackupName)
var backup apiv1.Backup
By("waiting the backup to complete", func() {
Eventually(func(g Gomega) {
@@ -596,11 +567,6 @@ var _ = Describe("Verify Volume Snapshot",
snapshotDataEnv = "SNAPSHOT_PITR_PGDATA"
snapshotWalEnv = "SNAPSHOT_PITR_PGWAL"
)
- // minio constants
- const (
- minioCaSecName = "minio-server-ca-secret"
- minioTLSSecName = "minio-server-tls-secret"
- )
// file constants
const (
clusterToSnapshot = filesDir + "/cluster-pvc-hot-snapshot.yaml.template"
@@ -630,39 +596,12 @@ var _ = Describe("Verify Volume Snapshot",
return env.DeleteNamespace(namespace)
})
- By("creating ca and tls certificate secrets", func() {
- // create CA certificates
- _, caPair, err := testUtils.CreateSecretCA(namespace, clusterToSnapshotName, minioCaSecName, true, env)
- Expect(err).ToNot(HaveOccurred())
-
- // sign and create secret using CA certificate and key
- serverPair, err := caPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
- []string{"minio-service.internal.mydomain.net, minio-service.default.svc, minio-service.default,"},
- )
- Expect(err).ToNot(HaveOccurred())
- serverSecret := serverPair.GenerateCertificateSecret(namespace, minioTLSSecName)
- err = env.Client.Create(env.Ctx, serverSecret)
- Expect(err).ToNot(HaveOccurred())
- })
-
- By("creating the credentials for minio", func() {
- AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
- })
-
- By("setting up minio", func() {
- setup, err := testUtils.MinioSSLSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioSSLClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
- Expect(err).ToNot(HaveOccurred())
- })
+ AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
By("creating the cluster to snapshot", func() {
AssertCreateCluster(namespace, clusterToSnapshotName, clusterToSnapshot, env)
@@ -673,7 +612,7 @@ var _ = Describe("Verify Volume Snapshot",
Expect(err).ToNot(HaveOccurred())
Eventually(func() (bool, error) {
connectionStatus, err := testUtils.MinioTestConnectivityUsingBarmanCloudWalArchive(
- namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123")
+ namespace, clusterToSnapshotName, primaryPod.GetName(), "minio", "minio123", minioEnv.ServiceName)
if err != nil {
return false, err
}
diff --git a/tests/e2e/wal_restore_parallel_test.go b/tests/e2e/wal_restore_parallel_test.go
index 9ec50b9980..fd3fe43acd 100644
--- a/tests/e2e/wal_restore_parallel_test.go
+++ b/tests/e2e/wal_restore_parallel_test.go
@@ -78,18 +78,8 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
AssertStorageCredentialsAreCreated(namespace, "backup-storage-creds", "minio", "minio123")
})
- By("setting up minio", func() {
- setup, err := testUtils.MinioDefaultSetup(namespace)
- Expect(err).ToNot(HaveOccurred())
- err = testUtils.InstallMinio(env, setup, uint(testTimeouts[testUtils.MinioInstallation]))
- Expect(err).ToNot(HaveOccurred())
- })
-
- // Create the minio client pod and wait for it to be ready.
- // We'll use it to check if everything is archived correctly
- By("setting up minio client pod", func() {
- minioClient := testUtils.MinioDefaultClient(namespace)
- err := testUtils.PodCreateAndWaitForReady(env, &minioClient, 240)
+ By("create the certificates for MinIO", func() {
+ err := minioEnv.CreateCaSecret(env, namespace)
Expect(err).ToNot(HaveOccurred())
})
@@ -103,6 +93,7 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// Get the standby
podList, err := env.GetClusterPodList(namespace, clusterName)
+ Expect(err).ToNot(HaveOccurred())
for _, po := range podList.Items {
if po.Name != primary {
// Only one standby in this specific testing
@@ -122,7 +113,7 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
latestWALPath := minioPath(clusterName, latestWAL+".gz")
Eventually(func() (int, error) {
// WALs are compressed with gzip in the fixture
- return testUtils.CountFilesOnMinio(namespace, minioClientName, latestWALPath)
+ return testUtils.CountFilesOnMinio(minioEnv, latestWALPath)
}, RetryTimeout).Should(BeEquivalentTo(1),
fmt.Sprintf("verify the existence of WAL %v in minio", latestWALPath))
})
@@ -133,15 +124,15 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
walFile3 = "0000000100000000000000F3"
walFile4 = "0000000100000000000000F4"
walFile5 = "0000000100000000000000F5"
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile1)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile1)).
ShouldNot(HaveOccurred())
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile2)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile2)).
ShouldNot(HaveOccurred())
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile3)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile3)).
ShouldNot(HaveOccurred())
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile4)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile4)).
ShouldNot(HaveOccurred())
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile5)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile5)).
ShouldNot(HaveOccurred())
})
@@ -265,7 +256,7 @@ var _ = Describe("Wal-restore in parallel", Label(tests.LabelBackupRestore), fun
// Generate a new wal file; the archive also contains WAL #6.
By("forging a new wal file, the #6 wal", func() {
walFile6 = "0000000100000000000000F6"
- Expect(testUtils.ForgeArchiveWalOnMinio(namespace, clusterName, minioClientName, latestWAL, walFile6)).
+ Expect(testUtils.ForgeArchiveWalOnMinio(minioEnv.Namespace, clusterName, minioEnv.Client.Name, latestWAL, walFile6)).
ShouldNot(HaveOccurred())
})
diff --git a/tests/utils/backup.go b/tests/utils/backup.go
index 078e19f16c..404d32e227 100644
--- a/tests/utils/backup.go
+++ b/tests/utils/backup.go
@@ -296,7 +296,7 @@ func CreateClusterFromExternalClusterBackupWithPITROnMinio(
Name: sourceClusterName,
BarmanObjectStore: &apiv1.BarmanObjectStoreConfiguration{
DestinationPath: "s3://cluster-backups/",
- EndpointURL: "https://minio-service:9000",
+ EndpointURL: "https://minio-service.minio:9000",
EndpointCA: &apiv1.SecretKeySelector{
LocalObjectReference: apiv1.LocalObjectReference{
Name: "minio-server-ca-secret",
diff --git a/tests/utils/commons.go b/tests/utils/commons.go
index e2c14899e3..d3c77a667e 100644
--- a/tests/utils/commons.go
+++ b/tests/utils/commons.go
@@ -31,7 +31,7 @@ import (
// be a real WAL archive name in an idle postgresql.
func ForgeArchiveWalOnMinio(namespace, clusterName, miniClientPodName, existingWALName, newWALName string) error {
// Forge a WAL archive by copying and renaming the 1st WAL archive
- minioWALBasePath := "minio/cluster-backups/" + clusterName + "/wals/0000000100000000"
+ minioWALBasePath := "minio/" + clusterName + "/" + clusterName + "/wals/0000000100000000"
existingWALPath := minioWALBasePath + "/" + existingWALName + ".gz"
newWALNamePath := minioWALBasePath + "/" + newWALName
forgeWALOnMinioCmd := "mc cp " + existingWALPath + " " + newWALNamePath
diff --git a/tests/utils/minio.go b/tests/utils/minio.go
index e3de1b650f..4a2248d6d7 100644
--- a/tests/utils/minio.go
+++ b/tests/utils/minio.go
@@ -29,10 +29,12 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
+ "github.com/cloudnative-pg/cloudnative-pg/pkg/certs"
"github.com/cloudnative-pg/cloudnative-pg/pkg/postgres"
"github.com/cloudnative-pg/cloudnative-pg/pkg/utils"
)
@@ -42,6 +44,19 @@ const (
minioClientImage = "minio/mc:RELEASE.2022-06-11T21-10-36Z"
)
+// MinioEnv contains all the information related or required by MinIO deployment and
+// used by the functions on every test
+type MinioEnv struct {
+ Client *corev1.Pod
+ CaPair *certs.KeyPair
+ CaSecretObj corev1.Secret
+ ServiceName string
+ Namespace string
+ CaSecretName string
+ TLSSecret string
+ Timeout uint
+}
+
// MinioSetup contains the resources needed for a working minio server deployment:
// a PersistentVolumeClaim, a Deployment and a Service
type MinioSetup struct {
@@ -361,11 +376,11 @@ func MinioDefaultClient(namespace string) corev1.Pod {
Env: []corev1.EnvVar{
{
Name: "MC_HOST_minio",
- Value: "http://minio:minio123@minio-service:9000",
+ Value: "http://minio:minio123@minio-service.minio:9000",
},
{
Name: "MC_URL",
- Value: "https://minio-service:9000",
+ Value: "https://minio-service.minio:9000",
},
{
Name: "HOME",
@@ -435,19 +450,88 @@ func MinioSSLClient(namespace string) corev1.Pod {
MountPath: tlsVolumeMountPath,
},
)
- minioClient.Spec.Containers[0].Env[0].Value = "https://minio:minio123@minio-service:9000"
+ minioClient.Spec.Containers[0].Env[0].Value = "https://minio:minio123@minio-service.minio:9000"
return minioClient
}
+// MinioDeploy will create a full MinIO deployment defined inthe minioEnv variable
+func MinioDeploy(minioEnv *MinioEnv, env *TestingEnvironment) (*corev1.Pod, error) {
+ var err error
+ minioEnv.CaPair, err = certs.CreateRootCA(minioEnv.Namespace, "minio")
+ if err != nil {
+ return nil, err
+ }
+
+ minioEnv.CaSecretObj = *minioEnv.CaPair.GenerateCASecret(minioEnv.Namespace, minioEnv.CaSecretName)
+ if _, err = CreateObject(env, &minioEnv.CaSecretObj); err != nil {
+ return nil, err
+ }
+
+ // sign and create secret using CA certificate and key
+ serverPair, err := minioEnv.CaPair.CreateAndSignPair("minio-service", certs.CertTypeServer,
+ []string{"minio.useless.domain.not.verified", "minio-service.minio"},
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ serverSecret := serverPair.GenerateCertificateSecret(minioEnv.Namespace, minioEnv.TLSSecret)
+ if err = env.Client.Create(env.Ctx, serverSecret); err != nil {
+ return nil, err
+ }
+
+ setup, err := MinioSSLSetup(minioEnv.Namespace)
+ if err != nil {
+ return nil, err
+ }
+ if err = InstallMinio(env, setup, minioEnv.Timeout); err != nil {
+ return nil, err
+ }
+
+ minioClient := MinioSSLClient(minioEnv.Namespace)
+
+ return &minioClient, PodCreateAndWaitForReady(env, &minioClient, 240)
+}
+
+func (m *MinioEnv) getCaSecret(env *TestingEnvironment, namespace string) (*corev1.Secret, error) {
+ var certSecret corev1.Secret
+ if err := env.Client.Get(env.Ctx,
+ types.NamespacedName{
+ Namespace: m.Namespace,
+ Name: m.CaSecretName,
+ }, &certSecret); err != nil {
+ return nil, err
+ }
+
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: m.CaSecretName,
+ Namespace: namespace,
+ },
+ Data: certSecret.Data,
+ Type: certSecret.Type,
+ }, nil
+}
+
+// CreateCaSecret creates the certificates required to authenticate against the the MinIO service
+func (m *MinioEnv) CreateCaSecret(env *TestingEnvironment, namespace string) error {
+ caSecret, err := m.getCaSecret(env, namespace)
+ if err != nil {
+ return err
+ }
+ _, err = CreateObject(env, caSecret)
+ return err
+}
+
// CountFilesOnMinio uses the minioClient in the given `namespace` to count the
// amount of files matching the given `path`
-func CountFilesOnMinio(namespace string, minioClientName string, path string) (value int, err error) {
+func CountFilesOnMinio(minioEnv *MinioEnv, path string) (value int, err error) {
var stdout string
stdout, _, err = RunUnchecked(fmt.Sprintf(
"kubectl exec -n %v %v -- %v",
- namespace,
- minioClientName,
+ minioEnv.Namespace,
+ minioEnv.Client.Name,
composeFindMinioCmd(path, "minio")))
if err != nil {
return -1, err
@@ -458,12 +542,12 @@ func CountFilesOnMinio(namespace string, minioClientName string, path string) (v
// ListFilesOnMinio uses the minioClient in the given `namespace` to list the
// paths matching the given `path`
-func ListFilesOnMinio(namespace string, minioClientName string, path string) (string, error) {
+func ListFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) {
var stdout string
stdout, _, err := RunUnchecked(fmt.Sprintf(
"kubectl exec -n %v %v -- %v",
- namespace,
- minioClientName,
+ minioEnv.Namespace,
+ minioEnv.Client.Name,
composeListFilesMinio(path, "minio")))
if err != nil {
return "", err
@@ -476,19 +560,24 @@ func composeListFilesMinio(path string, serviceName string) string {
return fmt.Sprintf("sh -c 'mc find %v --path %v'", serviceName, path)
}
+// composeListFilesMinio builds the Minio command to list the filenames matching a given path
+func composeCleanFilesMinio(path string) string {
+ return fmt.Sprintf("sh -c 'mc rm --force --recursive %v'", path)
+}
+
// composeFindMinioCmd builds the Minio find command
func composeFindMinioCmd(path string, serviceName string) string {
return fmt.Sprintf("sh -c 'mc find %v --path %v | wc -l'", serviceName, path)
}
// GetFileTagsOnMinio will use the minioClient to retrieve the tags in a specified path
-func GetFileTagsOnMinio(namespace, minioClientName, path string) (TagSet, error) {
+func GetFileTagsOnMinio(minioEnv *MinioEnv, path string) (TagSet, error) {
var output TagSet
// Make sure we have a registered backup to access
out, _, err := RunUncheckedRetry(fmt.Sprintf(
"kubectl exec -n %v %v -- sh -c 'mc find minio --name %v | head -n1'",
- namespace,
- minioClientName,
+ minioEnv.Namespace,
+ minioEnv.Client.Name,
path))
if err != nil {
return output, err
@@ -498,8 +587,8 @@ func GetFileTagsOnMinio(namespace, minioClientName, path string) (TagSet, error)
stdout, _, err := RunUncheckedRetry(fmt.Sprintf(
"kubectl exec -n %v %v -- sh -c 'mc --json tag list %v'",
- namespace,
- minioClientName,
+ minioEnv.Namespace,
+ minioEnv.Client.Name,
walFile))
if err != nil {
return output, err
@@ -513,9 +602,14 @@ func GetFileTagsOnMinio(namespace, minioClientName, path string) (TagSet, error)
}
// MinioTestConnectivityUsingBarmanCloudWalArchive returns true if test connection is successful else false
-func MinioTestConnectivityUsingBarmanCloudWalArchive(namespace, clusterName, podName, id, key string) (bool, error) {
- minioSvc := MinioDefaultSVC(namespace)
- minioSvcName := minioSvc.GetName()
+func MinioTestConnectivityUsingBarmanCloudWalArchive(
+ namespace,
+ clusterName,
+ podName,
+ id,
+ key string,
+ minioSvcName string,
+) (bool, error) {
// test connectivity should work with valid sample "000000010000000000000000" wal file
// using barman-cloud-wal-archive script
cmd := fmt.Sprintf("export AWS_CA_BUNDLE=%s;export AWS_ACCESS_KEY_ID=%s;export AWS_SECRET_ACCESS_KEY=%s;"+
@@ -532,3 +626,17 @@ func MinioTestConnectivityUsingBarmanCloudWalArchive(namespace, clusterName, pod
}
return true, nil
}
+
+// CleanFilesOnMinio clean files on minio for a given path
+func CleanFilesOnMinio(minioEnv *MinioEnv, path string) (string, error) {
+ var stdout string
+ stdout, _, err := RunUnchecked(fmt.Sprintf(
+ "kubectl exec -n %v %v -- %v",
+ minioEnv.Namespace,
+ minioEnv.Client.Name,
+ composeCleanFilesMinio(path)))
+ if err != nil {
+ return "", err
+ }
+ return strings.Trim(stdout, "\n"), nil
+}
diff --git a/tests/utils/operator.go b/tests/utils/operator.go
index bcb21228bc..3a81025b87 100644
--- a/tests/utils/operator.go
+++ b/tests/utils/operator.go
@@ -301,3 +301,24 @@ func GetOperatorVersion(namespace, podName string) (string, error) {
ver := versionRegexp.FindStringSubmatch(strings.TrimSpace(out))[1]
return ver, nil
}
+
+// GetOperatorArchitectures returns all the supported operator architectures
+func GetOperatorArchitectures(operatorPod *corev1.Pod) ([]string, error) {
+ out, _, err := RunUnchecked(fmt.Sprintf(
+ "kubectl -n %v exec %v -c manager -- /manager debug show-architectures",
+ operatorPod.Namespace,
+ operatorPod.Name,
+ ))
+ if err != nil {
+ return nil, err
+ }
+
+ // `debug show-architectures` will print a JSON object
+ var res []string
+ err = json.Unmarshal([]byte(out), &res)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
diff --git a/tests/utils/pod.go b/tests/utils/pod.go
index 42754a0fe7..5e18a1f1a8 100644
--- a/tests/utils/pod.go
+++ b/tests/utils/pod.go
@@ -192,6 +192,9 @@ func (env TestingEnvironment) ExecCommandInContainer(
if err != nil {
return "", "", wrapErr(err)
}
+ if !pkgutils.IsPodReady(*pod) {
+ return "", "", fmt.Errorf("pod not ready. Namespace: %v, Name: %v", pod.Namespace, pod.Name)
+ }
return env.ExecCommand(env.Ctx, *pod, container.ContainerName, timeout, command...)
}