From cf316c9266fbc750d14eecc2844ac8295664b173 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Thu, 10 Oct 2024 15:20:17 +0200
Subject: [PATCH 1/9] docs(fix): add missing links to APIs machinery and
barman-cloud (#5748)
Running the `make apidoc` was throwing the following error:
`External link source for` this is related to a missing configuration
to properly point the URLs in the documentation, this will add the
proper URLs for the machinery and barman-cloud APIs
Signed-off-by: Jonathan Gonzalez V.
---
docs/config.yaml | 4 ++++
docs/src/cloudnative-pg.v1.md | 40 +++++++++++++++++------------------
2 files changed, 24 insertions(+), 20 deletions(-)
diff --git a/docs/config.yaml b/docs/config.yaml
index cc5cde9174..aa77638cf6 100644
--- a/docs/config.yaml
+++ b/docs/config.yaml
@@ -24,6 +24,10 @@ externalPackages:
target: https://pkg.go.dev/time#Duration
- match: ^k8s\.io/(api|apimachinery/pkg/apis)/
target: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.28/#{{- lower .TypeIdentifier -}}-{{- arrIndex .PackageSegments -1 -}}-{{- arrIndex .PackageSegments -2 -}}
+ - match: ^github\.com/cloudnative-pg/machinery
+ target: https://pkg.go.dev/github.com/cloudnative-pg/machinery/pkg/api/#{{- .TypeIdentifier }}
+ - match: ^github\.com/cloudnative-pg/barman-cloud
+ target: https://pkg.go.dev/github.com/cloudnative-pg/barman-cloud/pkg/api/#{{- .TypeIdentifier }}
hideTypePatterns:
- "ParseError$"
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 22eb5d401e..54f24f842d 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -366,7 +366,7 @@ documentation
BarmanCredentials
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanCredentials
|
(Members of BarmanCredentials are embedded into this type.)
The potential credentials for each cloud provider
|
endpointCA
-github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
|
EndpointCA store the CA bundle of the barman endpoint.
@@ -912,7 +912,7 @@ by applications. Defaults to the value of the database key.
|
secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
Name of the secret containing the initial credentials for the
@@ -1082,7 +1082,7 @@ by applications. Defaults to the value of the database key.
|
secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
Name of the secret containing the initial credentials for the
@@ -1178,7 +1178,7 @@ by applications. Defaults to the value of the database key.
|
secret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
Name of the secret containing the initial credentials for the
@@ -1490,7 +1490,7 @@ Undefined or 0 disable synchronous replication.
|
superuserSecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
The secret containing the superuser password. If not defined a new
@@ -1517,7 +1517,7 @@ user by setting it to NULL . Disabled by default.
|
imagePullSecrets
-[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+[]github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
The list of pull secrets to be used to pull the images
@@ -2577,7 +2577,7 @@ secure and efficient password management for external clusters.
|
barmanObjectStore
-github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
+github.com/cloudnative-pg/barman-cloud/pkg/api.BarmanObjectStoreConfiguration
|
The configuration for the barman-cloud tool suite
@@ -3167,14 +3167,14 @@ Default: false.
|
customQueriesConfigMap
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
|
The list of config maps containing the custom queries
|
customQueriesSecret
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
|
The list of secrets containing the custom queries
@@ -3409,7 +3409,7 @@ by pgbouncer
|
authQuerySecret
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
The credentials of the user that need to be used for the authentication
@@ -3707,7 +3707,7 @@ part for now.
Field | Description |
|
secretRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.SecretKeySelector
|
SecretRefs holds a list of references to Secrets
|
configMapRefs
-[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
+[]github.com/cloudnative-pg/machinery/pkg/api.ConfigMapKeySelector
|
ConfigMapRefs holds a list of references to ConfigMaps
@@ -4386,7 +4386,7 @@ see https://pkg.go.dev/github.com/robfig/cron#hdr-CRON_Expression_Format
|
cluster [Required]
-github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
+github.com/cloudnative-pg/machinery/pkg/api.LocalObjectReference
|
The cluster to backup
From 6ae24ac9435c892a8738c78cc85a6aeb9bb87086 Mon Sep 17 00:00:00 2001
From: Jaime Silvela
Date: Thu, 10 Oct 2024 16:24:11 +0200
Subject: [PATCH 2/9] feat(database): add support for PG17 builtin_locale
(#5745)
Closes #5709
Signed-off-by: Jaime Silvela
---
api/v1/database_types.go | 10 ++++++++++
.../bases/postgresql.cnpg.io_databases.yaml | 12 ++++++++++++
docs/src/cloudnative-pg.v1.md | 14 ++++++++++++++
.../controller/database_controller_sql.go | 7 +++++++
.../database_controller_sql_test.go | 19 +++++++++++++++++++
5 files changed, 62 insertions(+)
diff --git a/api/v1/database_types.go b/api/v1/database_types.go
index dd7bd58cf5..243285dcbd 100644
--- a/api/v1/database_types.go
+++ b/api/v1/database_types.go
@@ -90,6 +90,16 @@ type DatabaseSpec struct {
// +optional
IcuRules string `json:"icu_rules,omitempty"`
+ // The BUILTIN_LOCALE (cannot be changed)
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="builtin_locale is immutable"
+ // +optional
+ BuiltinLocale string `json:"builtin_locale,omitempty"`
+
+ // The COLLATION_VERSION (cannot be changed)
+ // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="collation_version is immutable"
+ // +optional
+ CollationVersion string `json:"collation_version,omitempty"`
+
// True when the database is a template
// +optional
IsTemplate *bool `json:"isTemplate,omitempty"`
diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml
index f49202505e..7c29850d0d 100644
--- a/config/crd/bases/postgresql.cnpg.io_databases.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml
@@ -61,6 +61,12 @@ spec:
allowConnections:
description: True when connections to this database are allowed
type: boolean
+ builtin_locale:
+ description: The BUILTIN_LOCALE (cannot be changed)
+ type: string
+ x-kubernetes-validations:
+ - message: builtin_locale is immutable
+ rule: self == oldSelf
cluster:
description: The corresponding cluster
properties:
@@ -75,6 +81,12 @@ spec:
type: string
type: object
x-kubernetes-map-type: atomic
+ collation_version:
+ description: The COLLATION_VERSION (cannot be changed)
+ type: string
+ x-kubernetes-validations:
+ - message: collation_version is immutable
+ rule: self == oldSelf
connectionLimit:
description: |-
Connection limit, -1 means no limit and -2 means the
diff --git a/docs/src/cloudnative-pg.v1.md b/docs/src/cloudnative-pg.v1.md
index 54f24f842d..ba0d311131 100644
--- a/docs/src/cloudnative-pg.v1.md
+++ b/docs/src/cloudnative-pg.v1.md
@@ -2356,6 +2356,20 @@ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-
The ICU_RULES (cannot be changed)
|
+builtin_locale
+string
+ |
+
+ The BUILTIN_LOCALE (cannot be changed)
+ |
+
+collation_version
+string
+ |
+
+ The COLLATION_VERSION (cannot be changed)
+ |
+
isTemplate
bool
|
diff --git a/internal/management/controller/database_controller_sql.go b/internal/management/controller/database_controller_sql.go
index de8eaebdf3..b55ac5a659 100644
--- a/internal/management/controller/database_controller_sql.go
+++ b/internal/management/controller/database_controller_sql.go
@@ -101,6 +101,13 @@ func createDatabase(
if obj.Spec.IcuRules != "" {
sqlCreateDatabase.WriteString(fmt.Sprintf(" ICU_RULES %s", pgx.Identifier{obj.Spec.IcuRules}.Sanitize()))
}
+ if obj.Spec.BuiltinLocale != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" BUILTIN_LOCALE %s", pgx.Identifier{obj.Spec.BuiltinLocale}.Sanitize()))
+ }
+ if obj.Spec.CollationVersion != "" {
+ sqlCreateDatabase.WriteString(fmt.Sprintf(" COLLATION_VERSION %s",
+ pgx.Identifier{obj.Spec.CollationVersion}.Sanitize()))
+ }
_, err := db.ExecContext(ctx, sqlCreateDatabase.String())
if err != nil {
diff --git a/internal/management/controller/database_controller_sql_test.go b/internal/management/controller/database_controller_sql_test.go
index b95a13e076..25a697db47 100644
--- a/internal/management/controller/database_controller_sql_test.go
+++ b/internal/management/controller/database_controller_sql_test.go
@@ -133,6 +133,25 @@ var _ = Describe("Managed Database SQL", func() {
err = createDatabase(ctx, db, database)
Expect(err).ToNot(HaveOccurred())
})
+
+ It("should create a new Database with builtin locale", func(ctx SpecContext) {
+ database.Spec.LocaleProvider = "builtin"
+ database.Spec.BuiltinLocale = "C"
+ database.Spec.CollationVersion = "1.2.3"
+
+ expectedValue := sqlmock.NewResult(0, 1)
+ expectedQuery := fmt.Sprintf(
+ "CREATE DATABASE %s OWNER %s "+
+ "LOCALE_PROVIDER %s BUILTIN_LOCALE %s COLLATION_VERSION %s",
+ pgx.Identifier{database.Spec.Name}.Sanitize(), pgx.Identifier{database.Spec.Owner}.Sanitize(),
+ pgx.Identifier{database.Spec.LocaleProvider}.Sanitize(), pgx.Identifier{database.Spec.BuiltinLocale}.Sanitize(),
+ pgx.Identifier{database.Spec.CollationVersion}.Sanitize(),
+ )
+ dbMock.ExpectExec(expectedQuery).WillReturnResult(expectedValue)
+
+ err = createDatabase(ctx, db, database)
+ Expect(err).ToNot(HaveOccurred())
+ })
})
Context("updateDatabase", func() {
From 19f941b755a2390b2731c641d915ddff666bf306 Mon Sep 17 00:00:00 2001
From: Gabriele Fedi <91485518+GabriFedi97@users.noreply.github.com>
Date: Thu, 10 Oct 2024 17:07:20 +0200
Subject: [PATCH 3/9] fix(cnpg-i): ensure instance manager invokes only the
available plugins (#5651)
The instance manager should try to load only available plugins, as some
of them declared in the Cluster spec might be available only to the
operator.
closes #5648
---------
Signed-off-by: Gabriele Fedi
Signed-off-by: Leonardo Cecchi
Signed-off-by: Armando Ruocco
Co-authored-by: Leonardo Cecchi
Co-authored-by: Armando Ruocco
---
go.mod | 2 +-
go.sum | 4 ++--
internal/cmd/manager/controller/controller.go | 2 +-
internal/cmd/manager/walarchive/cmd.go | 13 ++++++++++--
internal/cmd/manager/walrestore/cmd.go | 13 ++++++++++--
internal/cnpi/plugin/repository/setup.go | 20 ++++++++++++-------
.../postgres/webserver/plugin_backup.go | 12 ++++++++++-
7 files changed, 50 insertions(+), 16 deletions(-)
diff --git a/go.mod b/go.mod
index dd3d77f58a..c6b261104a 100644
--- a/go.mod
+++ b/go.mod
@@ -12,7 +12,7 @@ require (
github.com/cheynewallace/tabby v1.1.1
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a
github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50
- github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392
+ github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/evanphx/json-patch/v5 v5.9.0
github.com/go-logr/logr v1.4.2
diff --git a/go.sum b/go.sum
index b6aa799796..dcf0977370 100644
--- a/go.sum
+++ b/go.sum
@@ -22,8 +22,8 @@ github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a h1:0v1
github.com/cloudnative-pg/barman-cloud v0.0.0-20240924124724-92831d48562a/go.mod h1:Jm0tOp5oB7utpt8wz6RfSv31h1mThOtffjfyxVupriE=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50 h1:Rm/bbC0GNCuWth5fHVMos99RzNczbWRVBdjubh3JMPs=
github.com/cloudnative-pg/cnpg-i v0.0.0-20241001103001-7e24b2eccd50/go.mod h1:lTWPq8pluS0PSnRMwt0zShftbyssoRhTJ5zAip8unl8=
-github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392 h1:DHaSe0PoLnIQFWIpRqB9RiBlNzbdLuVbiCtc9tN+FL0=
-github.com/cloudnative-pg/machinery v0.0.0-20241007093555-1e197af1f392/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM=
+github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72 h1:3pgtSYhv3RDd+51bnlqICNrcVpWQQvriCOvkxtbZpaE=
+github.com/cloudnative-pg/machinery v0.0.0-20241010122207-5ac7af31ef72/go.mod h1:bWp1Es5zlxElg4Z/c5f0RKOkDcyNvDHdYIvNcPQU4WM=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
diff --git a/internal/cmd/manager/controller/controller.go b/internal/cmd/manager/controller/controller.go
index ea66ab17f8..b61f967844 100644
--- a/internal/cmd/manager/controller/controller.go
+++ b/internal/cmd/manager/controller/controller.go
@@ -219,7 +219,7 @@ func RunController(
}
pluginRepository := repository.New()
- if err := pluginRepository.RegisterUnixSocketPluginsInPath(
+ if _, err := pluginRepository.RegisterUnixSocketPluginsInPath(
conf.PluginSocketDir,
); err != nil {
setupLog.Error(err, "Unable to load sidecar CNPG-i plugins, skipping")
diff --git a/internal/cmd/manager/walarchive/cmd.go b/internal/cmd/manager/walarchive/cmd.go
index 616cecefa0..bfd897329d 100644
--- a/internal/cmd/manager/walarchive/cmd.go
+++ b/internal/cmd/manager/walarchive/cmd.go
@@ -29,6 +29,7 @@ import (
barmanArchiver "github.com/cloudnative-pg/barman-cloud/pkg/archiver"
"github.com/cloudnative-pg/machinery/pkg/fileutils"
"github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -251,12 +252,20 @@ func archiveWALViaPlugins(
contextLogger := log.FromContext(ctx)
plugins := repository.New()
- if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil {
+ availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir)
+ if err != nil {
contextLogger.Error(err, "Error while loading local plugins")
}
defer plugins.Close()
- client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...)
+ availablePluginNamesSet := stringset.From(availablePluginNames)
+ enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
+
+ client, err := pluginClient.WithPlugins(
+ ctx,
+ plugins,
+ availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()...,
+ )
if err != nil {
contextLogger.Error(err, "Error while loading required plugins")
return err
diff --git a/internal/cmd/manager/walrestore/cmd.go b/internal/cmd/manager/walrestore/cmd.go
index c60c0cf194..50d237ff06 100644
--- a/internal/cmd/manager/walrestore/cmd.go
+++ b/internal/cmd/manager/walrestore/cmd.go
@@ -29,6 +29,7 @@ import (
barmanCommand "github.com/cloudnative-pg/barman-cloud/pkg/command"
barmanRestorer "github.com/cloudnative-pg/barman-cloud/pkg/restorer"
"github.com/cloudnative-pg/machinery/pkg/log"
+ "github.com/cloudnative-pg/machinery/pkg/stringset"
"github.com/spf13/cobra"
apiv1 "github.com/cloudnative-pg/cloudnative-pg/api/v1"
@@ -247,12 +248,20 @@ func restoreWALViaPlugins(
contextLogger := log.FromContext(ctx)
plugins := repository.New()
- if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil {
+ availablePluginNames, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir)
+ if err != nil {
contextLogger.Error(err, "Error while loading local plugins")
}
defer plugins.Close()
- client, err := pluginClient.WithPlugins(ctx, plugins, cluster.Spec.Plugins.GetEnabledPluginNames()...)
+ availablePluginNamesSet := stringset.From(availablePluginNames)
+ enabledPluginNamesSet := stringset.From(cluster.Spec.Plugins.GetEnabledPluginNames())
+
+ client, err := pluginClient.WithPlugins(
+ ctx,
+ plugins,
+ availablePluginNamesSet.Intersect(enabledPluginNamesSet).ToList()...,
+ )
if err != nil {
contextLogger.Error(err, "Error while loading required plugins")
return err
diff --git a/internal/cnpi/plugin/repository/setup.go b/internal/cnpi/plugin/repository/setup.go
index 76da6773e8..e43b5f1091 100644
--- a/internal/cnpi/plugin/repository/setup.go
+++ b/internal/cnpi/plugin/repository/setup.go
@@ -25,6 +25,7 @@ import (
"github.com/cloudnative-pg/machinery/pkg/log"
"github.com/jackc/puddle/v2"
+ "go.uber.org/multierr"
"github.com/cloudnative-pg/cloudnative-pg/internal/cnpi/plugin/connection"
)
@@ -41,8 +42,9 @@ type Interface interface {
RegisterRemotePlugin(name string, address string, tlsConfig *tls.Config) error
// RegisterUnixSocketPluginsInPath scans the passed directory
- // for plugins that are deployed with unix sockets
- RegisterUnixSocketPluginsInPath(pluginsPath string) error
+ // for plugins that are deployed with unix sockets.
+ // Return the list of loaded plugin names
+ RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error)
// GetConnection gets a connection to the plugin with specified name
GetConnection(ctx context.Context, name string) (connection.Interface, error)
@@ -149,30 +151,34 @@ func (r *data) RegisterRemotePlugin(name string, address string, tlsConfig *tls.
})
}
-func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) error {
+func (r *data) RegisterUnixSocketPluginsInPath(pluginsPath string) ([]string, error) {
entries, err := os.ReadDir(pluginsPath)
if err != nil {
// There's no need to complain if the plugin folder doesn't exist
if os.IsNotExist(err) {
- return nil
+ return nil, nil
}
// Otherwise, this means we can't read that folder and
// is a real problem
- return err
+ return nil, err
}
+ pluginsNames := make([]string, 0, len(entries))
+ var errors error
for _, entry := range entries {
name := entry.Name()
if err := r.registerUnixSocketPlugin(
name,
path.Join(pluginsPath, name),
); err != nil {
- return err
+ errors = multierr.Append(errors, err)
+ } else {
+ pluginsNames = append(pluginsNames, name)
}
}
- return nil
+ return pluginsNames, errors
}
// New creates a new plugin repository
diff --git a/pkg/management/postgres/webserver/plugin_backup.go b/pkg/management/postgres/webserver/plugin_backup.go
index 0619cff770..d0cc5043eb 100644
--- a/pkg/management/postgres/webserver/plugin_backup.go
+++ b/pkg/management/postgres/webserver/plugin_backup.go
@@ -55,6 +55,16 @@ func NewPluginBackupCommand(
) *PluginBackupCommand {
backup.EnsureGVKIsPresent()
+ logger := log.WithValues(
+ "pluginConfiguration", backup.Spec.PluginConfiguration,
+ "backupName", backup.Name,
+ "backupNamespace", backup.Name)
+
+ plugins := repository.New()
+ if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil {
+ logger.Error(err, "Error while discovering plugins")
+ }
+
return &PluginBackupCommand{
Cluster: cluster,
Backup: backup,
@@ -75,7 +85,7 @@ func (b *PluginBackupCommand) invokeStart(ctx context.Context) {
"backupNamespace", b.Backup.Name)
plugins := repository.New()
- if err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil {
+ if _, err := plugins.RegisterUnixSocketPluginsInPath(configuration.Current.PluginSocketDir); err != nil {
contextLogger.Error(err, "Error while discovering plugins")
}
defer plugins.Close()
From 68e6b79e32257f81b2b34f74dc88e4d58803bccb Mon Sep 17 00:00:00 2001
From: Gabriele Bartolini
Date: Thu, 10 Oct 2024 20:16:23 +0200
Subject: [PATCH 4/9] feat(plugin): compact output of `status` command (#5765)
Make `--verbose` a countable option and introduce multiple levels of
verbosity. The following panels have been moved to level 1 of verbosity:
- Physical backups
- Unmanaged Replication Slot Status
- Managed roles status
- Tablespaces status
- Pod Disruption Budgets status
The following panels to level 2:
- PostgreSQL configuration
- PostgreSQL HBA
- Certificates Status
Physical base backups are displayed when they are in progress even with
verbosity 0.
Closes #5757
Signed-off-by: Gabriele Bartolini
Signed-off-by: Leonardo Cecchi
Signed-off-by: Jaime Silvela
Co-authored-by: Leonardo Cecchi
Co-authored-by: Jaime Silvela
---
docs/src/kubectl-plugin.md | 182 ++++++++++-----------------
internal/cmd/plugin/status/cmd.go | 6 +-
internal/cmd/plugin/status/status.go | 56 +++++----
3 files changed, 101 insertions(+), 143 deletions(-)
diff --git a/docs/src/kubectl-plugin.md b/docs/src/kubectl-plugin.md
index 01d9af2a29..1a2da0c607 100755
--- a/docs/src/kubectl-plugin.md
+++ b/docs/src/kubectl-plugin.md
@@ -250,143 +250,95 @@ kubectl cnpg status sandbox
```
```shell
-Cluster in healthy state
-Name: sandbox
-Namespace: default
-System ID: 7039966298120953877
-PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0
-Primary instance: sandbox-2
-Instances: 3
-Ready instances: 3
-Current Write LSN: 3AF/EAFA6168 (Timeline: 8 - WAL File: 00000008000003AF00000075)
+Cluster Summary
+Name: default/sandbox
+System ID: 7423474350493388827
+PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4
+Primary instance: sandbox-1
+Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 1m14s)
+Status: Cluster in healthy state
+Instances: 3
+Ready instances: 3
+Size: 126M
+Current Write LSN: 0/604DE38 (Timeline: 1 - WAL File: 000000010000000000000006)
Continuous Backup status
-First Point of Recoverability: Not Available
-Working WAL archiving: OK
-Last Archived WAL: 00000008000003AE00000079 @ 2021-12-14T10:16:29.340047Z
-Last Failed WAL: -
-
-Certificates Status
-Certificate Name Expiration Date Days Left Until Expiration
----------------- --------------- --------------------------
-cluster-example-ca 2022-05-05 15:02:42 +0000 UTC 87.23
-cluster-example-replication 2022-05-05 15:02:42 +0000 UTC 87.23
-cluster-example-server 2022-05-05 15:02:42 +0000 UTC 87.23
+Not configured
Streaming Replication status
-Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority
----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- -------------
-sandbox-1 3AF/EB0524F0 3AF/EB011760 3AF/EAFEDE50 3AF/EAFEDE50 00:00:00.004461 00:00:00.007901 00:00:00.007901 streaming quorum 1
-sandbox-3 3AF/EB0524F0 3AF/EB030B00 3AF/EB030B00 3AF/EB011760 00:00:00.000977 00:00:00.004194 00:00:00.008252 streaming quorum 1
+Replication Slots Enabled
+Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot
+---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ----------------
+sandbox-2 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active
+sandbox-3 0/604DE38 0/604DE38 0/604DE38 0/604DE38 00:00:00 00:00:00 00:00:00 streaming async 0 active
Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version
----- ------------- ----------- ---------------- ------ --- ---------------
-sandbox-1 302 GB 3AF/E9FFFFE0 Standby (sync) OK Guaranteed 1.11.0
-sandbox-2 302 GB 3AF/EAFA6168 Primary OK Guaranteed 1.11.0
-sandbox-3 302 GB 3AF/EBAD5D18 Standby (sync) OK Guaranteed 1.11.0
+Name Current LSN Replication role Status QoS Manager Version Node
+---- ----------- ---------------- ------ --- --------------- ----
+sandbox-1 0/604DE38 Primary OK BestEffort 1.24.0 k8s-eu-worker
+sandbox-2 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2
+sandbox-3 0/604DE38 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker
```
-You can also get a more verbose version of the status by adding
-`--verbose` or just `-v`
+If you require more detailed status information, use the `--verbose` option (or
+`-v` for short). The level of detail increases each time the flag is repeated:
```shell
kubectl cnpg status sandbox --verbose
```
```shell
-Cluster in healthy state
-Name: sandbox
-Namespace: default
-System ID: 7039966298120953877
-PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:17.0
-Primary instance: sandbox-2
-Instances: 3
-Ready instances: 3
-Current Write LSN: 3B1/61DE3158 (Timeline: 8 - WAL File: 00000008000003B100000030)
-
-PostgreSQL Configuration
-archive_command = '/controller/manager wal-archive --log-destination /controller/log/postgres.json %p'
-archive_mode = 'on'
-archive_timeout = '5min'
-checkpoint_completion_target = '0.9'
-checkpoint_timeout = '900s'
-cluster_name = 'sandbox'
-dynamic_shared_memory_type = 'sysv'
-full_page_writes = 'on'
-hot_standby = 'true'
-jit = 'on'
-listen_addresses = '*'
-log_autovacuum_min_duration = '1s'
-log_checkpoints = 'on'
-log_destination = 'csvlog'
-log_directory = '/controller/log'
-log_filename = 'postgres'
-log_lock_waits = 'on'
-log_min_duration_statement = '1000'
-log_rotation_age = '0'
-log_rotation_size = '0'
-log_statement = 'ddl'
-log_temp_files = '1024'
-log_truncate_on_rotation = 'false'
-logging_collector = 'on'
-maintenance_work_mem = '2GB'
-max_connections = '1000'
-max_parallel_workers = '32'
-max_replication_slots = '32'
-max_wal_size = '15GB'
-max_worker_processes = '32'
-pg_stat_statements.max = '10000'
-pg_stat_statements.track = 'all'
-port = '5432'
-shared_buffers = '16GB'
-shared_memory_type = 'sysv'
-shared_preload_libraries = 'pg_stat_statements'
-ssl = 'on'
-ssl_ca_file = '/controller/certificates/client-ca.crt'
-ssl_cert_file = '/controller/certificates/server.crt'
-ssl_key_file = '/controller/certificates/server.key'
-synchronous_standby_names = 'ANY 1 ("sandbox-1","sandbox-3")'
-unix_socket_directories = '/controller/run'
-wal_keep_size = '512MB'
-wal_level = 'logical'
-wal_log_hints = 'on'
-cnpg.config_sha256 = '3cfa683e23fe513afaee7c97b50ce0628e0cc634bca8b096517538a9a4428efc'
-
-PostgreSQL HBA Rules
-
-# Grant local access
-local all all peer map=local
-
-# Require client certificate authentication for the streaming_replica user
-hostssl postgres streaming_replica all cert
-hostssl replication streaming_replica all cert
-hostssl all cnpg_pooler_pgbouncer all cert
-
-# Otherwise use the default authentication method
-host all all all scram-sha-256
-
+Cluster Summary
+Name: default/sandbox
+System ID: 7423474350493388827
+PostgreSQL Image: ghcr.io/cloudnative-pg/postgresql:16.4
+Primary instance: sandbox-1
+Primary start time: 2024-10-08 18:31:57 +0000 UTC (uptime 2m4s)
+Status: Cluster in healthy state
+Instances: 3
+Ready instances: 3
+Size: 126M
+Current Write LSN: 0/6053720 (Timeline: 1 - WAL File: 000000010000000000000006)
Continuous Backup status
-First Point of Recoverability: Not Available
-Working WAL archiving: OK
-Last Archived WAL: 00000008000003B00000001D @ 2021-12-14T10:20:42.272815Z
-Last Failed WAL: -
+Not configured
+
+Physical backups
+No running physical backups found
Streaming Replication status
-Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority
----- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- -------------
-sandbox-1 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000333 00:00:00.000333 00:00:00.005484 streaming quorum 1
-sandbox-3 3B1/61E26448 3B1/61E26448 3B1/61DF82F0 3B1/61DF82F0 00:00:00.000756 00:00:00.000756 00:00:00.000756 streaming quorum 1
+Replication Slots Enabled
+Name Sent LSN Write LSN Flush LSN Replay LSN Write Lag Flush Lag Replay Lag State Sync State Sync Priority Replication Slot Slot Restart LSN Slot WAL Status Slot Safe WAL Size
+---- -------- --------- --------- ---------- --------- --------- ---------- ----- ---------- ------------- ---------------- ---------------- --------------- ------------------
+sandbox-2 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL
+sandbox-3 0/6053720 0/6053720 0/6053720 0/6053720 00:00:00 00:00:00 00:00:00 streaming async 0 active 0/6053720 reserved NULL
+
+Unmanaged Replication Slot Status
+No unmanaged replication slots found
+
+Managed roles status
+No roles managed
+
+Tablespaces status
+No managed tablespaces
+
+Pod Disruption Budgets status
+Name Role Expected Pods Current Healthy Minimum Desired Healthy Disruptions Allowed
+---- ---- ------------- --------------- ----------------------- -------------------
+sandbox replica 2 2 1 1
+sandbox-primary primary 1 1 1 0
Instances status
-Name Database Size Current LSN Replication role Status QoS Manager Version
----- ------------- ----------- ---------------- ------ --- ---------------
-sandbox-1 3B1/610204B8 Standby (sync) OK Guaranteed 1.11.0
-sandbox-2 3B1/61DE3158 Primary OK Guaranteed 1.11.0
-sandbox-3 3B1/62618470 Standby (sync) OK Guaranteed 1.11.0
+Name Current LSN Replication role Status QoS Manager Version Node
+---- ----------- ---------------- ------ --- --------------- ----
+sandbox-1 0/6053720 Primary OK BestEffort 1.24.0 k8s-eu-worker
+sandbox-2 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker2
+sandbox-3 0/6053720 Standby (async) OK BestEffort 1.24.0 k8s-eu-worker
```
+With an additional `-v` (e.g. `kubectl cnpg status sandbox -v -v`), you can
+also view PostgreSQL configuration, HBA settings, and certificates.
+
The command also supports output in `yaml` and `json` format.
### Promote
diff --git a/internal/cmd/plugin/status/cmd.go b/internal/cmd/plugin/status/cmd.go
index 4ca0f70a63..a22594523d 100644
--- a/internal/cmd/plugin/status/cmd.go
+++ b/internal/cmd/plugin/status/cmd.go
@@ -41,15 +41,15 @@ func NewCmd() *cobra.Command {
ctx := cmd.Context()
clusterName := args[0]
- verbose, _ := cmd.Flags().GetBool("verbose")
+ verbose, _ := cmd.Flags().GetCount("verbose")
output, _ := cmd.Flags().GetString("output")
return Status(ctx, clusterName, verbose, plugin.OutputFormat(output))
},
}
- statusCmd.Flags().BoolP(
- "verbose", "v", false, "Include PostgreSQL configuration, HBA rules, and full replication slots info")
+ statusCmd.Flags().CountP(
+ "verbose", "v", "Increase verbosity to display more information")
statusCmd.Flags().StringP(
"output", "o", "text", "Output format. One of text|json")
diff --git a/internal/cmd/plugin/status/status.go b/internal/cmd/plugin/status/status.go
index 1889975c55..3ae465b96a 100644
--- a/internal/cmd/plugin/status/status.go
+++ b/internal/cmd/plugin/status/status.go
@@ -99,7 +99,12 @@ func getPrintableIntegerPointer(i *int) string {
}
// Status implements the "status" subcommand
-func Status(ctx context.Context, clusterName string, verbose bool, format plugin.OutputFormat) error {
+func Status(
+ ctx context.Context,
+ clusterName string,
+ verbosity int,
+ format plugin.OutputFormat,
+) error {
var cluster apiv1.Cluster
var errs []error
@@ -123,17 +128,19 @@ func Status(ctx context.Context, clusterName string, verbose bool, format plugin
status.printHibernationInfo()
status.printDemotionTokenInfo()
status.printPromotionTokenInfo()
- if verbose {
+ if verbosity > 1 {
errs = append(errs, status.printPostgresConfiguration(ctx, clientInterface)...)
+ status.printCertificatesStatus()
}
- status.printCertificatesStatus()
status.printBackupStatus()
- status.printBasebackupStatus()
- status.printReplicaStatus(verbose)
- status.printUnmanagedReplicationSlotStatus()
- status.printRoleManagerStatus()
- status.printTablespacesStatus()
- status.printPodDisruptionBudgetStatus()
+ status.printBasebackupStatus(verbosity)
+ status.printReplicaStatus(verbosity)
+ if verbosity > 0 {
+ status.printUnmanagedReplicationSlotStatus()
+ status.printRoleManagerStatus()
+ status.printTablespacesStatus()
+ status.printPodDisruptionBudgetStatus()
+ }
status.printInstancesStatus()
if len(errs) > 0 {
@@ -217,10 +224,10 @@ func (fullStatus *PostgresqlStatus) getClusterSize(ctx context.Context, client k
return size, nil
}
-func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client kubernetes.Interface) {
+func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, k8sClient kubernetes.Interface) {
summary := tabby.New()
- clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, client)
+ clusterSize, clusterSizeErr := fullStatus.getClusterSize(ctx, k8sClient)
cluster := fullStatus.Cluster
@@ -243,8 +250,7 @@ func (fullStatus *PostgresqlStatus) printBasicInfo(ctx context.Context, client k
isPrimaryFenced := cluster.IsInstanceFenced(cluster.Status.CurrentPrimary)
primaryInstanceStatus := fullStatus.tryGetPrimaryInstance()
- summary.AddLine("Name:", cluster.Name)
- summary.AddLine("Namespace:", cluster.Namespace)
+ summary.AddLine("Name", client.ObjectKeyFromObject(cluster).String())
if primaryInstanceStatus != nil {
summary.AddLine("System ID:", primaryInstanceStatus.SystemID)
@@ -529,9 +535,9 @@ func (fullStatus *PostgresqlStatus) areReplicationSlotsEnabled() bool {
fullStatus.Cluster.Spec.ReplicationSlots.HighAvailability.GetEnabled()
}
-func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbose bool) {
+func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.Tabby, verbosity int) {
switch {
- case fullStatus.areReplicationSlotsEnabled() && verbose:
+ case fullStatus.areReplicationSlotsEnabled() && verbosity > 0:
table.AddHeader(
"Name",
"Sent LSN",
@@ -549,7 +555,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T
"Slot WAL Status",
"Slot Safe WAL Size",
)
- case fullStatus.areReplicationSlotsEnabled() && !verbose:
+ case fullStatus.areReplicationSlotsEnabled() && verbosity == 0:
table.AddHeader(
"Name",
"Sent LSN",
@@ -585,7 +591,7 @@ func (fullStatus *PostgresqlStatus) printReplicaStatusTableHeader(table *tabby.T
func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns(
applicationName string,
columns *[]interface{},
- verbose bool,
+ verbosity int,
) {
printSlotActivity := func(isActive bool) string {
if isActive {
@@ -595,18 +601,18 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns(
}
slot := fullStatus.getPrintableReplicationSlotInfo(applicationName)
switch {
- case slot != nil && verbose:
+ case slot != nil && verbosity > 0:
*columns = append(*columns,
printSlotActivity(slot.Active),
slot.RestartLsn,
slot.WalStatus,
getPrintableIntegerPointer(slot.SafeWalSize),
)
- case slot != nil && !verbose:
+ case slot != nil && verbosity == 0:
*columns = append(*columns,
printSlotActivity(slot.Active),
)
- case slot == nil && verbose:
+ case slot == nil && verbosity > 0:
*columns = append(*columns,
"-",
"-",
@@ -620,7 +626,7 @@ func (fullStatus *PostgresqlStatus) addReplicationSlotsColumns(
}
}
-func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) {
+func (fullStatus *PostgresqlStatus) printReplicaStatus(verbosity int) {
if fullStatus.Cluster.IsReplica() {
return
}
@@ -650,13 +656,13 @@ func (fullStatus *PostgresqlStatus) printReplicaStatus(verbose bool) {
}
status := tabby.New()
- fullStatus.printReplicaStatusTableHeader(status, verbose)
+ fullStatus.printReplicaStatusTableHeader(status, verbosity)
// print Replication Slots columns only if the cluster has replication slots enabled
addReplicationSlotsColumns := func(_ string, _ *[]interface{}) {}
if fullStatus.areReplicationSlotsEnabled() {
addReplicationSlotsColumns = func(applicationName string, columns *[]interface{}) {
- fullStatus.addReplicationSlotsColumns(applicationName, columns, verbose)
+ fullStatus.addReplicationSlotsColumns(applicationName, columns, verbosity)
}
}
@@ -977,7 +983,7 @@ func (fullStatus *PostgresqlStatus) printPodDisruptionBudgetStatus() {
fmt.Println()
}
-func (fullStatus *PostgresqlStatus) printBasebackupStatus() {
+func (fullStatus *PostgresqlStatus) printBasebackupStatus(verbosity int) {
const header = "Physical backups"
primaryInstanceStatus := fullStatus.tryGetPrimaryInstance()
@@ -988,7 +994,7 @@ func (fullStatus *PostgresqlStatus) printBasebackupStatus() {
return
}
- if len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 {
+ if verbosity > 0 && len(primaryInstanceStatus.PgStatBasebackupsInfo) == 0 {
fmt.Println(aurora.Green(header))
fmt.Println(aurora.Yellow("No running physical backups found").String())
fmt.Println()
From c188f4b09a21d8075dd37dc2ca68b7218ca2efda Mon Sep 17 00:00:00 2001
From: Leonardo Cecchi
Date: Sat, 12 Oct 2024 10:05:51 +0200
Subject: [PATCH 5/9] fix(plugin): ensure pgadmin4 has a writable home
directory (#5800)
Closes: #5799
Signed-off-by: Leonardo Cecchi
---
internal/cmd/plugin/pgadmin/pgadmin.go | 24 ++++++++++++++++++++++++
1 file changed, 24 insertions(+)
diff --git a/internal/cmd/plugin/pgadmin/pgadmin.go b/internal/cmd/plugin/pgadmin/pgadmin.go
index 44a23f89c4..078164a29e 100644
--- a/internal/cmd/plugin/pgadmin/pgadmin.go
+++ b/internal/cmd/plugin/pgadmin/pgadmin.go
@@ -26,6 +26,7 @@ import (
"github.com/sethvargo/go-password/password"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
@@ -273,6 +274,14 @@ func (cmd *command) generateDeployment() *appsv1.Deployment {
Name: pgAdminPassFileVolumeName,
MountPath: pgAdminPassFileVolumePath,
},
+ {
+ Name: "tmp",
+ MountPath: "/tmp",
+ },
+ {
+ Name: "home",
+ MountPath: "/home/pgadmin",
+ },
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
@@ -303,6 +312,21 @@ func (cmd *command) generateDeployment() *appsv1.Deployment {
},
},
},
+ {
+ Name: "home",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{},
+ },
+ },
+ {
+ Name: "tmp",
+ VolumeSource: corev1.VolumeSource{
+ EmptyDir: &corev1.EmptyDirVolumeSource{
+ Medium: corev1.StorageMediumMemory,
+ SizeLimit: ptr.To(resource.MustParse("100Mi")),
+ },
+ },
+ },
},
},
},
From d20fb269191df2484a562f5ee5cf96f76345c4fe Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sun, 13 Oct 2024 16:56:03 +0200
Subject: [PATCH 6/9] chore(deps): update spellcheck to v0.43.0 (main) (#5807)
---
.github/workflows/spellcheck.yml | 2 +-
Makefile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/spellcheck.yml b/.github/workflows/spellcheck.yml
index a8bbd7866c..847ca8891b 100644
--- a/.github/workflows/spellcheck.yml
+++ b/.github/workflows/spellcheck.yml
@@ -28,4 +28,4 @@ jobs:
uses: actions/checkout@v4
- name: Spellcheck
- uses: rojopolis/spellcheck-github-actions@0.42.0
+ uses: rojopolis/spellcheck-github-actions@0.43.0
diff --git a/Makefile b/Makefile
index d9577c7cd0..61dbcda441 100644
--- a/Makefile
+++ b/Makefile
@@ -44,7 +44,7 @@ POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions
KUSTOMIZE_VERSION ?= v5.4.3
CONTROLLER_TOOLS_VERSION ?= v0.16.3
GORELEASER_VERSION ?= v2.3.2
-SPELLCHECK_VERSION ?= 0.42.0
+SPELLCHECK_VERSION ?= 0.43.0
WOKE_VERSION ?= 0.19.0
OPERATOR_SDK_VERSION ?= v1.37.0
OPM_VERSION ?= v1.47.0
From 47d82aba63108510934cbd7ddeeb117e6be22df6 Mon Sep 17 00:00:00 2001
From: "Jonathan Gonzalez V."
Date: Sun, 13 Oct 2024 20:05:28 +0200
Subject: [PATCH 7/9] chore: switch to dl.k8s.io on the E2E tests (#5814)
Following this https://github.com/kubernetes/k8s.io/issues/2396 we
should have moved away a long time ago, now this change happened
and the E2E tests are failing due to a wrong link to download the
kubectl client.
Signed-off-by: Jonathan Gonzalez V.
---
hack/setup-cluster.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index 00ad1bfa30..636046527e 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -299,7 +299,7 @@ install_kubectl() {
local binary="${bindir}/kubectl"
- curl -sL "https://storage.googleapis.com/kubernetes-release/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}"
+ curl -sL "https://dl.k8s.io/release/v${KUBECTL_VERSION#v}/bin/${OS}/${ARCH}/kubectl" -o "${binary}"
chmod +x "${binary}"
}
From 37d29b4867f2719dac485c7a0c8660c7eb3573a3 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Sun, 13 Oct 2024 23:00:34 +0200
Subject: [PATCH 8/9] chore(deps): update kindest/node docker tag to v1.31.1
(main) (#5756)
---
hack/e2e/run-e2e-kind.sh | 2 +-
hack/setup-cluster.sh | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/hack/e2e/run-e2e-kind.sh b/hack/e2e/run-e2e-kind.sh
index 3f77232917..f37c274a8d 100755
--- a/hack/e2e/run-e2e-kind.sh
+++ b/hack/e2e/run-e2e-kind.sh
@@ -29,7 +29,7 @@ E2E_DIR="${HACK_DIR}/e2e"
export PRESERVE_CLUSTER=${PRESERVE_CLUSTER:-false}
export BUILD_IMAGE=${BUILD_IMAGE:-false}
-KIND_NODE_DEFAULT_VERSION=v1.31.0
+KIND_NODE_DEFAULT_VERSION=v1.31.1
export K8S_VERSION=${K8S_VERSION:-$KIND_NODE_DEFAULT_VERSION}
export CLUSTER_ENGINE=kind
export CLUSTER_NAME=pg-operator-e2e-${K8S_VERSION//./-}
diff --git a/hack/setup-cluster.sh b/hack/setup-cluster.sh
index 636046527e..d8ba973fe7 100755
--- a/hack/setup-cluster.sh
+++ b/hack/setup-cluster.sh
@@ -24,7 +24,7 @@ if [ "${DEBUG-}" = true ]; then
fi
# Defaults
-KIND_NODE_DEFAULT_VERSION=v1.31.0
+KIND_NODE_DEFAULT_VERSION=v1.31.1
K3D_NODE_DEFAULT_VERSION=v1.30.3
CSI_DRIVER_HOST_PATH_DEFAULT_VERSION=v1.15.0
EXTERNAL_SNAPSHOTTER_VERSION=v8.1.0
From bda9e45531e87de84c828fe71053752f1d5094eb Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 14 Oct 2024 11:26:28 +0200
Subject: [PATCH 9/9] chore(deps): update module sigs.k8s.io/controller-tools
to v0.16.4 (main) (#5815)
---
Makefile | 2 +-
config/crd/bases/postgresql.cnpg.io_backups.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_clusters.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_databases.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_poolers.yaml | 2 +-
config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml | 2 +-
8 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/Makefile b/Makefile
index 61dbcda441..3e363b0fd7 100644
--- a/Makefile
+++ b/Makefile
@@ -42,7 +42,7 @@ LOCALBIN ?= $(shell pwd)/bin
BUILD_IMAGE ?= true
POSTGRES_IMAGE_NAME ?= $(shell grep 'DefaultImageName.*=' "pkg/versions/versions.go" | cut -f 2 -d \")
KUSTOMIZE_VERSION ?= v5.4.3
-CONTROLLER_TOOLS_VERSION ?= v0.16.3
+CONTROLLER_TOOLS_VERSION ?= v0.16.4
GORELEASER_VERSION ?= v2.3.2
SPELLCHECK_VERSION ?= 0.43.0
WOKE_VERSION ?= 0.19.0
diff --git a/config/crd/bases/postgresql.cnpg.io_backups.yaml b/config/crd/bases/postgresql.cnpg.io_backups.yaml
index 96be8399da..9e1b5295a4 100644
--- a/config/crd/bases/postgresql.cnpg.io_backups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_backups.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: backups.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
index 4581679377..0bbb4455be 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusterimagecatalogs.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: clusterimagecatalogs.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_clusters.yaml b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
index 128d44e47d..d2f810b24e 100644
--- a/config/crd/bases/postgresql.cnpg.io_clusters.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_clusters.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: clusters.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_databases.yaml b/config/crd/bases/postgresql.cnpg.io_databases.yaml
index 7c29850d0d..ea1fbfdba5 100644
--- a/config/crd/bases/postgresql.cnpg.io_databases.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_databases.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: databases.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
index c961bf2eda..1205cd2261 100644
--- a/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_imagecatalogs.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: imagecatalogs.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_poolers.yaml b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
index e09e39b615..59d32f7571 100644
--- a/config/crd/bases/postgresql.cnpg.io_poolers.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_poolers.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: poolers.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io
diff --git a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
index 15fb35c0ba..6c43327c8e 100644
--- a/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
+++ b/config/crd/bases/postgresql.cnpg.io_scheduledbackups.yaml
@@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
- controller-gen.kubebuilder.io/version: v0.16.3
+ controller-gen.kubebuilder.io/version: v0.16.4
name: scheduledbackups.postgresql.cnpg.io
spec:
group: postgresql.cnpg.io