diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml index a8bbd7866c..847ca8891b 100644 --- a/.github/workflows/spellcheck.yml +++ b/.github/workflows/spellcheck.yml @@ -28,4 +28,4 @@ jobs: uses: actions/checkout@v4 - name: Spellcheck - uses: rojopolis/spellcheck-github-actions@0.42.0 + uses: rojopolis/spellcheck-github-actions@0.43.0 diff --git a/Makefile b/Makefile index d9577c7cd0..3e363b0fd7 100644 --- a/Makefile +++ b/Makefile @@ -42,9 +42,9 @@ LOCALBIN ?= $(shell pwd)/bin BUILD_IMAGE ?= true POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \") KUSTOMIZE_VERSION ?= v5.4.3 -CONTROLLER_TOOLS_VERSION ?= v0.16.3 +CONTROLLER_TOOLS_VERSION ?= v0.16.4 GORELEASER_VERSION ?= v2.3.2 -SPELLCHECK_VERSION ?= 0.42.0 +SPELLCHECK_VERSION ?= 0.43.0 WOKE_VERSION ?= 0.19.0 OPERATOR_SDK_VERSION ?= v1.37.0 OPM_VERSION ?= v1.47.0 diff --git a/api/v1/database_types.go b/api/v1/database_types.go index dd7bd58cf5..243285dcbd 100644 --- a/api/v1/database_types.go +++ b/api/v1/database_types.go @@ -90,6 +90,16 @@ type DatabaseSpec struct { // +optional IcuRules string `json:"icu_rules,omitempty"` + // The BUILTIN_LOCALE (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtin_locale is immutable" + // +optional + BuiltinLocale string `json:"builtin_locale,omitempty"` + + // The COLLATION_VERSION (cannot be changed) + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collation_version is immutable" + // +optional + CollationVersion string `json:"collation_version,omitempty"` + // True when the database is a template // +optional IsTemplate *bool `json:"isTemplate,omitempty"` diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml index 96be8399da..9e1b5295a4 100644 --- a/config/crd/bases/postgresql.cnpg.io_backups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: backups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml index 4581679377..0bbb4455be 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: clusterimagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml index 128d44e47d..d2f810b24e 100644 --- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml +++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: clusters.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml index f49202505e..ea1fbfdba5 100644 --- a/config/crd/bases/postgresql.cnpg.io_databases.yaml +++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: databases.postgresql.cnpg.io spec: group: postgresql.cnpg.io @@ -61,6 +61,12 @@ spec: allowConnections: description: True when connections to this database are allowed type: boolean + builtin_locale: + description: The BUILTIN_LOCALE (cannot be changed) + type: string + x-kubernetes-validations: + - message: builtin_locale is immutable + rule: self == oldSelf cluster: description: The corresponding cluster properties: @@ -75,6 +81,12 @@ spec: type: string type: object x-kubernetes-map-type: atomic + collation_version: + description: The COLLATION_VERSION (cannot be changed) + type: string + x-kubernetes-validations: + - message: collation_version is immutable + rule: self == oldSelf connectionLimit: description: |- Connection limit, -1 means no limit and -2 means the diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml index c961bf2eda..1205cd2261 100644 --- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml +++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: imagecatalogs.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml index e09e39b615..59d32f7571 100644 --- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml +++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: poolers.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml index 15fb35c0ba..6c43327c8e 100644 --- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml +++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.16.3 + controller-gen.kubebuilder.io/version: v0.16.4 name: scheduledbackups.postgresql.cnpg.io spec: group: postgresql.cnpg.io diff --git a/docs/config.yaml b/docs/config.yaml index cc5cde9174..aa77638cf6 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -24,6 +24,10 @@ externalPackages: target: https://pkg.go.dev/time#Duration - match: ^k8s\.io/(api|apimachinery/pkg/apis)/ target: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#{{- lower .TypeIdentifier -}}-{{- arrIndex .PackageSegments -1 -}}-{{- arrIndex .PackageSegments -2 -}} + - match: ^github\.com/cloudnative-pg/machinery + target: https://pkg.go.dev/github.com/cloudnative-pg/machinery/pkg/api/#{{- .TypeIdentifier }} + - match: ^github\.com/cloudnative-pg/barman-cloud + target: https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api/#{{- .TypeIdentifier }} hideTypePatterns: - "ParseError$" diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md index 52fcb1829a..0b86efc032 100644 --- a/docs/src/cloudnative-pg.v1.md +++ b/docs/src/cloudnative-pg.v1.md @@ -366,7 +366,7 @@ documentation
barmanObjectStore
The configuration for the barman-cloud tool suite
@@ -543,13 +543,13 @@ information that could be needed to correctly restore it.LocalObjectReference
LocalObjectReference
are embedded into this type.)
No description provided.endpointCA
EndpointCA store the CA bundle of the barman endpoint. @@ -575,7 +575,7 @@ errors with certificate issuer and barman-cloud-wal-archive.
cluster
[Required]The cluster to backup
@@ -643,14 +643,14 @@ Overrides the default settings specified in the cluster '.backup.volumeSnapshot.BarmanCredentials
BarmanCredentials
are embedded into this type.)
The potential credentials for each cloud provider
endpointCA
EndpointCA store the CA bundle of the barman endpoint.
@@ -912,7 +912,7 @@ by applications. Defaults to the value of the database
key.
secret
Name of the secret containing the initial credentials for the
@@ -1082,7 +1082,7 @@ by applications. Defaults to the value of the database
key.
secret
Name of the secret containing the initial credentials for the
@@ -1178,7 +1178,7 @@ by applications. Defaults to the value of the database
key.
secret
Name of the secret containing the initial credentials for the @@ -1490,7 +1490,7 @@ Undefined or 0 disable synchronous replication.
superuserSecret
The secret containing the superuser password. If not defined a new
@@ -1517,7 +1517,7 @@ user by setting it to NULL
. Disabled by default.
imagePullSecrets
The list of pull secrets to be used to pull the images
@@ -2356,6 +2356,20 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-The ICU_RULES (cannot be changed)
builtin_locale
The BUILTIN_LOCALE (cannot be changed)
+collation_version
The COLLATION_VERSION (cannot be changed)
+isTemplate
barmanObjectStore
The configuration for the barman-cloud tool suite
@@ -3167,14 +3181,14 @@ Default: false.customQueriesConfigMap
The list of config maps containing the custom queries
customQueriesSecret
The list of secrets containing the custom queries
@@ -3409,7 +3423,7 @@ by pgbouncerauthQuerySecret
The credentials of the user that need to be used for the authentication @@ -3707,7 +3721,7 @@ part for now.
cluster
[Required]This is the cluster reference on which the Pooler will work. @@ -4430,7 +4444,7 @@ Reference: https://www.postgresql.org/docs/current/sql-createrole.html
passwordSecret
Secret containing the password of the role (if present) @@ -4557,14 +4571,14 @@ in their respective arrays.
secretRefs
SecretRefs holds a list of references to Secrets
configMapRefs
ConfigMapRefs holds a list of references to ConfigMaps
@@ -4611,7 +4625,7 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Formatcluster
[Required]The cluster to backup
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md index 01d9af2a29..1a2da0c607 100755 --- a/docs/src/kubectl-plugin.md +++ b/docs/src/kubectl-plugin.md @@ -250,143 +250,95 @@ kubectl cnpg status sandbox ``` ```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3AF/EAFA6168 (Timeline: 8 - WAL File: 00000008000003AF00000075) +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 1m14s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/604DE38 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003AE00000079 @ 2021-12-14T10:16:29.340047Z -Last Failed WAL: - - -Certificates Status -Certificate Name Expiration Date Days Left Until Expiration ----------------- --------------- -------------------------- -cluster-example-ca 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-replication 2022-05-05 15:02:42 +0000 UTC 87.23 -cluster-example-server 2022-05-05 15:02:42 +0000 UTC 87.23 +Not configured Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3AF/EB0524F0 3AF/EB011760 3AF/EAFEDE50 3AF/EAFEDE50 00:00:00.004461 00:00:00.007901 00:00:00.007901 streaming quorum 1 -sandbox-3 3AF/EB0524F0 3AF/EB030B00 3AF/EB030B00 3AF/EB011760 00:00:00.000977 00:00:00.004194 00:00:00.008252 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- +sandbox-2 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active +sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 302 GB 3AF/E9FFFFE0 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 302 GB 3AF/EAFA6168 Primary OK Guaranteed 1.11.0 -sandbox-3 302 GB 3AF/EBAD5D18 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/604DE38 Primary OK BestEffort 1.24.0 k8s-eu-worker +sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 +sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker ``` -You can also get a more verbose version of the status by adding -`--verbose` or just `-v` +If you require more detailed status information, use the `--verbose` option (or +`-v` for short). The level of detail increases each time the flag is repeated: ```shell kubectl cnpg status sandbox --verbose ``` ```shell -Cluster in healthy state -Name: sandbox -Namespace: default -System ID: 7039966298120953877 -PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0 -Primary instance: sandbox-2 -Instances: 3 -Ready instances: 3 -Current Write LSN: 3B1/61DE3158 (Timeline: 8 - WAL File: 00000008000003B100000030) - -PostgreSQL Configuration -archive_command = '/controller/manager wal-archive --log-destination /controller/log/postgres.json %p' -archive_mode = 'on' -archive_timeout = '5min' -checkpoint_completion_target = '0.9' -checkpoint_timeout = '900s' -cluster_name = 'sandbox' -dynamic_shared_memory_type = 'sysv' -full_page_writes = 'on' -hot_standby = 'true' -jit = 'on' -listen_addresses = '*' -log_autovacuum_min_duration = '1s' -log_checkpoints = 'on' -log_destination = 'csvlog' -log_directory = '/controller/log' -log_filename = 'postgres' -log_lock_waits = 'on' -log_min_duration_statement = '1000' -log_rotation_age = '0' -log_rotation_size = '0' -log_statement = 'ddl' -log_temp_files = '1024' -log_truncate_on_rotation = 'false' -logging_collector = 'on' -maintenance_work_mem = '2GB' -max_connections = '1000' -max_parallel_workers = '32' -max_replication_slots = '32' -max_wal_size = '15GB' -max_worker_processes = '32' -pg_stat_statements.max = '10000' -pg_stat_statements.track = 'all' -port = '5432' -shared_buffers = '16GB' -shared_memory_type = 'sysv' -shared_preload_libraries = 'pg_stat_statements' -ssl = 'on' -ssl_ca_file = '/controller/certificates/client-ca.crt' -ssl_cert_file = '/controller/certificates/server.crt' -ssl_key_file = '/controller/certificates/server.key' -synchronous_standby_names = 'ANY 1 ("sandbox-1","sandbox-3")' -unix_socket_directories = '/controller/run' -wal_keep_size = '512MB' -wal_level = 'logical' -wal_log_hints = 'on' -cnpg.config_sha256 = '3cfa683e23fe513afaee7c97b50ce0628e0cc634bca8b096517538a9a4428efc' - -PostgreSQL HBA Rules - -# Grant local access -local all all peer map=local - -# Require client certificate authentication for the streaming_replica user -hostssl postgres streaming_replica all cert -hostssl replication streaming_replica all cert -hostssl all cnpg_pooler_pgbouncer all cert - -# Otherwise use the default authentication method -host all all all scram-sha-256 - +Cluster Summary +Name: default/sandbox +System ID: 7423474350493388827 +PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4 +Primary instance: sandbox-1 +Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 2m4s) +Status: Cluster in healthy state +Instances: 3 +Ready instances: 3 +Size: 126M +Current Write LSN: 0/6053720 (Timeline: 1 - WAL File: 000000010000000000000006) Continuous Backup status -First Point of Recoverability: Not Available -Working WAL archiving: OK -Last Archived WAL: 00000008000003B00000001D @ 2021-12-14T10:20:42.272815Z -Last Failed WAL: - +Not configured + +Physical backups +No running physical backups found Streaming Replication status -Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority ----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- -sandbox-1 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000333 00:00:00.000333 00:00:00.005484 streaming quorum 1 -sandbox-3 3B1/61E26448 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000756 00:00:00.000756 00:00:00.000756 streaming quorum 1 +Replication Slots Enabled +Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot Slot Restart LSN Slot WAL Status Slot Safe WAL Size +---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- ---------------- --------------- ------------------ +sandbox-2 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL +sandbox-3 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL + +Unmanaged Replication Slot Status +No unmanaged replication slots found + +Managed roles status +No roles managed + +Tablespaces status +No managed tablespaces + +Pod Disruption Budgets status +Name Role Expected Pods Current Healthy Minimum Desired Healthy Disruptions Allowed +---- ---- ------------- --------------- ----------------------- ------------------- +sandbox replica 2 2 1 1 +sandbox-primary primary 1 1 1 0 Instances status -Name Database Size Current LSN Replication role Status QoS Manager Version ----- ------------- ----------- ---------------- ------ --- --------------- -sandbox-1 3B1/610204B8 Standby (sync) OK Guaranteed 1.11.0 -sandbox-2 3B1/61DE3158 Primary OK Guaranteed 1.11.0 -sandbox-3 3B1/62618470 Standby (sync) OK Guaranteed 1.11.0 +Name Current LSN Replication role Status QoS Manager Version Node +---- ----------- ---------------- ------ --- --------------- ---- +sandbox-1 0/6053720 Primary OK BestEffort 1.24.0 k8s-eu-worker +sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2 +sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker ``` +With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can +also view PostgreSQL configuration, HBA settings, and certificates. + The command also supports output in `yaml` and `json` format. ### Promote diff --git a/go.mod b/go.mod index dd3d77f58a..c6b261104a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/cheynewallace/tabby v1.1.1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 - github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 + github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-logr/logr v1.4.2 diff --git a/go.sum b/go.sum index b6aa799796..dcf0977370 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1 github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs= github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8= -github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0= -github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= +github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 h1:3pgtSYhv3RDd+51bnlqICNrcVpWQQvriCOvkxtbZpaE= +github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh index 3f77232917..f37c274a8d 100755 --- a/hack/e2e/run-e2e-kind.sh +++ b/hack/e2e/run-e2e-kind.sh @@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e" export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false} export BUILD_IMAGE=${BUILD_IMAGE:-false} -KIND_NODE_DEFAULT_VERSION=v1.31.0 +KIND_NODE_DEFAULT_VERSION=v1.31.1 export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION} export CLUSTER_ENGINE=kind export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-} diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh index 00ad1bfa30..d8ba973fe7 100755 --- a/hack/setup-cluster.sh +++ b/hack/setup-cluster.sh @@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then fi # Defaults -KIND_NODE_DEFAULT_VERSION=v1.31.0 +KIND_NODE_DEFAULT_VERSION=v1.31.1 K3D_NODE_DEFAULT_VERSION=v1.30.3 CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0 EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0 @@ -299,7 +299,7 @@ install_kubectl() { local binary="${bindir}/kubectl" - curl -sL "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" + curl -sL "https://dl.k8s.io/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}" chmod +x "${binary}" } diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go index ea66ab17f8..b61f967844 100644 --- a/internal/cmd/manager/controller/controller.go +++ b/internal/cmd/manager/controller/controller.go @@ -219,7 +219,7 @@ func RunController( } pluginRepository := repository.New() - if err := pluginRepository.RegisterUnixSocketPluginsInPath( + if _, err := pluginRepository.RegisterUnixSocketPluginsInPath( conf.PluginSocketDir, ); err != nil { setupLog.Error(err, "Unable to load sidecar CNPG-i plugins, skipping") diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go index 616cecefa0..bfd897329d 100644 --- a/internal/cmd/manager/walarchive/cmd.go +++ b/internal/cmd/manager/walarchive/cmd.go @@ -29,6 +29,7 @@ import ( barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver" "github.com/cloudnative-pg/machinery/pkg/fileutils" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -251,12 +252,20 @@ func archiveWALViaPlugins( contextLogger := log.FromContext(ctx) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { contextLogger.Error(err, "Error while loading local plugins") } defer plugins.Close() - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + availablePluginNamesSet := stringset.From(availablePluginNames) + enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + client, err := pluginClient.WithPlugins( + ctx, + plugins, + availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return err diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go index c60c0cf194..50d237ff06 100644 --- a/internal/cmd/manager/walrestore/cmd.go +++ b/internal/cmd/manager/walrestore/cmd.go @@ -29,6 +29,7 @@ import ( barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command" barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer" "github.com/cloudnative-pg/machinery/pkg/log" + "github.com/cloudnative-pg/machinery/pkg/stringset" "github.com/spf13/cobra" apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1" @@ -247,12 +248,20 @@ func restoreWALViaPlugins( contextLogger := log.FromContext(ctx) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir) + if err != nil { contextLogger.Error(err, "Error while loading local plugins") } defer plugins.Close() - client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...) + availablePluginNamesSet := stringset.From(availablePluginNames) + enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames()) + + client, err := pluginClient.WithPlugins( + ctx, + plugins, + availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()..., + ) if err != nil { contextLogger.Error(err, "Error while loading required plugins") return err diff --git a/internal/cmd/plugin/pgadmin/pgadmin.go b/internal/cmd/plugin/pgadmin/pgadmin.go index 44a23f89c4..078164a29e 100644 --- a/internal/cmd/plugin/pgadmin/pgadmin.go +++ b/internal/cmd/plugin/pgadmin/pgadmin.go @@ -26,6 +26,7 @@ import ( "github.com/sethvargo/go-password/password" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" @@ -273,6 +274,14 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { Name: pgAdminPassFileVolumeName, MountPath: pgAdminPassFileVolumePath, }, + { + Name: "tmp", + MountPath: "/tmp", + }, + { + Name: "home", + MountPath: "/home/pgadmin", + }, }, ReadinessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -303,6 +312,21 @@ func (cmd *command) generateDeployment() *appsv1.Deployment { }, }, }, + { + Name: "home", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: ptr.To(resource.MustParse("100Mi")), + }, + }, + }, }, }, }, diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go index 4ca0f70a63..a22594523d 100644 --- a/internal/cmd/plugin/status/cmd.go +++ b/internal/cmd/plugin/status/cmd.go @@ -41,15 +41,15 @@ func NewCmd() *cobra.Command { ctx := cmd.Context() clusterName := args[0] - verbose, _ := cmd.Flags().GetBool("verbose") + verbose, _ := cmd.Flags().GetCount("verbose") output, _ := cmd.Flags().GetString("output") return Status(ctx, clusterName, verbose, plugin.OutputFormat(output)) }, } - statusCmd.Flags().BoolP( - "verbose", "v", false, "Include PostgreSQL configuration, HBA rules, and full replication slots info") + statusCmd.Flags().CountP( + "verbose", "v", "Increase verbosity to display more information") statusCmd.Flags().StringP( "output", "o", "text", "Output format. One of text|json") diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go index 1889975c55..3ae465b96a 100644 --- a/internal/cmd/plugin/status/status.go +++ b/internal/cmd/plugin/status/status.go @@ -99,7 +99,12 @@ func getPrintableIntegerPointer(i *int) string { } // Status implements the "status" subcommand -func Status(ctx context.Context, clusterName string, verbose bool, format plugin.OutputFormat) error { +func Status( + ctx context.Context, + clusterName string, + verbosity int, + format plugin.OutputFormat, +) error { var cluster apiv1.Cluster var errs []error @@ -123,17 +128,19 @@ func Status(ctx context.Context, clusterName string, verbose bool, format plugin status.printHibernationInfo() status.printDemotionTokenInfo() status.printPromotionTokenInfo() - if verbose { + if verbosity > 1 { errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...) + status.printCertificatesStatus() } - status.printCertificatesStatus() status.printBackupStatus() - status.printBasebackupStatus() - status.printReplicaStatus(verbose) - status.printUnmanagedReplicationSlotStatus() - status.printRoleManagerStatus() - status.printTablespacesStatus() - status.printPodDisruptionBudgetStatus() + status.printBasebackupStatus(verbosity) + status.printReplicaStatus(verbosity) + if verbosity > 0 { + status.printUnmanagedReplicationSlotStatus() + status.printRoleManagerStatus() + status.printTablespacesStatus() + status.printPodDisruptionBudgetStatus() + } status.printInstancesStatus() if len(errs) > 0 { @@ -217,10 +224,10 @@ func (fullStatus *PostgresqlStatus) getClusterSize(ctx context.Context, client k return size, nil } -func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client kubernetes.Interface) { +func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClient kubernetes.Interface) { summary := tabby.New() - clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, client) + clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, k8sClient) cluster := fullStatus.Cluster @@ -243,8 +250,7 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client k isPrimaryFenced := cluster.IsInstanceFenced(cluster.Status.CurrentPrimary) primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() - summary.AddLine("Name:", cluster.Name) - summary.AddLine("Namespace:", cluster.Namespace) + summary.AddLine("Name", client.ObjectKeyFromObject(cluster).String()) if primaryInstanceStatus != nil { summary.AddLine("System ID:", primaryInstanceStatus.SystemID) @@ -529,9 +535,9 @@ func (fullStatus *PostgresqlStatus) areReplicationSlotsEnabled() bool { fullStatus.Cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled() } -func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbosity int) { switch { - case fullStatus.areReplicationSlotsEnabled() && verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity > 0: table.AddHeader( "Name", "Sent LSN", @@ -549,7 +555,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T "Slot WAL Status", "Slot Safe WAL Size", ) - case fullStatus.areReplicationSlotsEnabled() && !verbose: + case fullStatus.areReplicationSlotsEnabled() && verbosity == 0: table.AddHeader( "Name", "Sent LSN", @@ -585,7 +591,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( applicationName string, columns *[]interface{}, - verbose bool, + verbosity int, ) { printSlotActivity := func(isActive bool) string { if isActive { @@ -595,18 +601,18 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } slot := fullStatus.getPrintableReplicationSlotInfo(applicationName) switch { - case slot != nil && verbose: + case slot != nil && verbosity > 0: *columns = append(*columns, printSlotActivity(slot.Active), slot.RestartLsn, slot.WalStatus, getPrintableIntegerPointer(slot.SafeWalSize), ) - case slot != nil && !verbose: + case slot != nil && verbosity == 0: *columns = append(*columns, printSlotActivity(slot.Active), ) - case slot == nil && verbose: + case slot == nil && verbosity > 0: *columns = append(*columns, "-", "-", @@ -620,7 +626,7 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns( } } -func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { +func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) { if fullStatus.Cluster.IsReplica() { return } @@ -650,13 +656,13 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) { } status := tabby.New() - fullStatus.printReplicaStatusTableHeader(status, verbose) + fullStatus.printReplicaStatusTableHeader(status, verbosity) // print Replication Slots columns only if the cluster has replication slots enabled addReplicationSlotsColumns := func(_ string, _ *[]interface{}) {} if fullStatus.areReplicationSlotsEnabled() { addReplicationSlotsColumns = func(applicationName string, columns *[]interface{}) { - fullStatus.addReplicationSlotsColumns(applicationName, columns, verbose) + fullStatus.addReplicationSlotsColumns(applicationName, columns, verbosity) } } @@ -977,7 +983,7 @@ func (fullStatus *PostgresqlStatus) printPodDisruptionBudgetStatus() { fmt.Println() } -func (fullStatus *PostgresqlStatus) printBasebackupStatus() { +func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) { const header = "Physical backups" primaryInstanceStatus := fullStatus.tryGetPrimaryInstance() @@ -988,7 +994,7 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus() { return } - if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { + if verbosity > 0 && len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 { fmt.Println(aurora.Green(header)) fmt.Println(aurora.Yellow("No running physical backups found").String()) fmt.Println() diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go index 76da6773e8..e43b5f1091 100644 --- a/internal/cnpi/plugin/repository/setup.go +++ b/internal/cnpi/plugin/repository/setup.go @@ -25,6 +25,7 @@ import ( "github.com/cloudnative-pg/machinery/pkg/log" "github.com/jackc/puddle/v2" + "go.uber.org/multierr" "github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection" ) @@ -41,8 +42,9 @@ type Interface interface { RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error // RegisterUnixSocketPluginsInPath scans the passed directory - // for plugins that are deployed with unix sockets - RegisterUnixSocketPluginsInPath(pluginsPath string) error + // for plugins that are deployed with unix sockets. + // Return the list of loaded plugin names + RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) // GetConnection gets a connection to the plugin with specified name GetConnection(ctx context.Context, name string) (connection.Interface, error) @@ -149,30 +151,34 @@ func (r *data) RegisterRemotePlugin(name string, address string, tlsConfig *tls. }) } -func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) error { +func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) { entries, err := os.ReadDir(pluginsPath) if err != nil { // There's no need to complain if the plugin folder doesn't exist if os.IsNotExist(err) { - return nil + return nil, nil } // Otherwise, this means we can't read that folder and // is a real problem - return err + return nil, err } + pluginsNames := make([]string, 0, len(entries)) + var errors error for _, entry := range entries { name := entry.Name() if err := r.registerUnixSocketPlugin( name, path.Join(pluginsPath, name), ); err != nil { - return err + errors = multierr.Append(errors, err) + } else { + pluginsNames = append(pluginsNames, name) } } - return nil + return pluginsNames, errors } // New creates a new plugin repository diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go index de8eaebdf3..b55ac5a659 100644 --- a/internal/management/controller/database_controller_sql.go +++ b/internal/management/controller/database_controller_sql.go @@ -101,6 +101,13 @@ func createDatabase( if obj.Spec.IcuRules != "" { sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize())) } + if obj.Spec.BuiltinLocale != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize())) + } + if obj.Spec.CollationVersion != "" { + sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s", + pgx.Identifier{obj.Spec.CollationVersion}.Sanitize())) + } _, err := db.ExecContext(ctx, sqlCreateDatabase.String()) if err != nil { diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go index b95a13e076..25a697db47 100644 --- a/internal/management/controller/database_controller_sql_test.go +++ b/internal/management/controller/database_controller_sql_test.go @@ -133,6 +133,25 @@ var _ = Describe("Managed Database SQL", func() { err = createDatabase(ctx, db, database) Expect(err).ToNot(HaveOccurred()) }) + + It("should create a new Database with builtin locale", func(ctx SpecContext) { + database.Spec.LocaleProvider = "builtin" + database.Spec.BuiltinLocale = "C" + database.Spec.CollationVersion = "1.2.3" + + expectedValue := sqlmock.NewResult(0, 1) + expectedQuery := fmt.Sprintf( + "CREATE DATABASE %s OWNER %s "+ + "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s", + pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(), + pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(), + pgx.Identifier{database.Spec.CollationVersion}.Sanitize(), + ) + dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue) + + err = createDatabase(ctx, db, database) + Expect(err).ToNot(HaveOccurred()) + }) }) Context("updateDatabase", func() { diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go index 0619cff770..d0cc5043eb 100644 --- a/pkg/management/postgres/webserver/plugin_backup.go +++ b/pkg/management/postgres/webserver/plugin_backup.go @@ -55,6 +55,16 @@ func NewPluginBackupCommand( ) *PluginBackupCommand { backup.EnsureGVKIsPresent() + logger := log.WithValues( + "pluginConfiguration", backup.Spec.PluginConfiguration, + "backupName", backup.Name, + "backupNamespace", backup.Name) + + plugins := repository.New() + if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + logger.Error(err, "Error while discovering plugins") + } + return &PluginBackupCommand{ Cluster: cluster, Backup: backup, @@ -75,7 +85,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) { "backupNamespace", b.Backup.Name) plugins := repository.New() - if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { + if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil { contextLogger.Error(err, "Error while discovering plugins") } defer plugins.Close()