Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use proper timezone in the timestamp received from barman-cloud-backup-list #97

Open
wants to merge 10 commits into
base: main
Choose a base branch
from
2 changes: 1 addition & 1 deletion .github/renovate.json5
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
"prConcurrentLimit": 5,
// The branches renovate should target
// PLEASE UPDATE THIS WHEN RELEASING.
"baseBranches": ["main","release-1.22", "release-1.23"],
"baseBranches": ["main","release-1.22", "release-1.23", "release-1.24"],
"ignorePaths": ["docs/**", "releases/**", "contribute/**", "licenses/**", "pkg/versions/**"],
"postUpdateOptions": ["gomodTidy"],
"semanticCommits": "enabled",
Expand Down
4 changes: 3 additions & 1 deletion .github/workflows/backport.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ jobs:
backport-requested :arrow_backward:
release-1.22
release-1.23
release-1.24
-
name: Create comment
uses: peter-evans/create-or-update-comment@v4
Expand All @@ -56,6 +57,7 @@ jobs:
backport-requested :arrow_backward:
release-1.22
release-1.23
release-1.24

## backport pull request in condition when pr contains 'backport-requested' label and contains target branches labels
back-porting-pr:
Expand All @@ -71,7 +73,7 @@ jobs:
strategy:
fail-fast: false
matrix:
branch: [release-1.22, release-1.23]
branch: [release-1.22, release-1.23, release-1.24]
env:
PR: ${{ github.event.pull_request.number }}
outputs:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/continuous-delivery.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ jobs:
strategy:
fail-fast: false
matrix:
branch: [release-1.22, release-1.23]
branch: [release-1.22, release-1.23, release-1.24]
steps:
- name: Invoke workflow with inputs
uses: benc-uk/workflow-dispatch@v1
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/continuous-integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ jobs:
strategy:
fail-fast: false
matrix:
branch: [release-1.22, release-1.23]
branch: [release-1.22, release-1.23, release-1.24]

steps:
- name: Invoke workflow with inputs
Expand Down
2 changes: 2 additions & 0 deletions ADOPTERS.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,5 @@ This list is sorted in chronological order, based on the submission date.
| [ParadeDB](https://paradedb.com) | @philippemnoel | 2024-07-10 | ParadeDB is an Elasticsearch alternative on Postgres. It leverages CloudNativePG to manage ParadeDB Postgres clusters which connect to a customer's existing Postgres infrastructure via logical (streaming) replication. |
| [REWE International AG](https://rewe-group.at/en) | @rewemkris | 2024-08-21 |Hello! 👋 We are the DBMS Team of RIAG IT, responsible for managing databases worldwide for our stores, warehouses, and online shops. We leverage CloudNativePG to provide PostgreSQL as a Service, creating highly available databases running on Kubernetes in both Google Cloud and on-premises environments.|
| [Microsoft Azure](https://azure.microsoft.com/en-us/) | @KenKilty | 2024-08-22 | Learn how to [deploy](https://learn.microsoft.com/azure/aks/postgresql-ha-overview) PostgreSQL on [Azure Kubernetes Services (AKS)](https://learn.microsoft.com/azure/aks/what-is-aks) with [EDB commercial support](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.edb-enterprise) and [EDB Postgres-as-a-Service](https://azuremarketplace.microsoft.com/en-us/marketplace/apps/enterprisedb-corp.biganimal-prod-v1) offerings available in the [Azure Marketplace](https://azuremarketplace.microsoft.com/).|
| [PZU Group](https://www.pzu.pl) | @MichaluxPL | 2024-08-26 | PZU is one of the largest financial institutions in Poland and also the largest insurance company in Central and Eastern Europe. CloudNativePG is used as on-premise cloud solution/DBaaS to provide highly available PostgreSQL clusters.|

2 changes: 1 addition & 1 deletion api/v1/cluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -563,7 +563,7 @@ const (

// PhaseUnknownPlugin is triggered when the required CNPG-i plugin have not been
// loaded still
PhaseUnknownPlugin = "Unknown plugin"
PhaseUnknownPlugin = "Cluster cannot proceed to reconciliation due to an unknown plugin being required"

// PhaseImageCatalogError is triggered when the cluster cannot select the image to
// apply because of an invalid or incomplete catalog
Expand Down
1 change: 1 addition & 0 deletions internal/cmd/plugin/destroy/destroy.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ func Destroy(ctx context.Context, clusterName, instanceName string, keepPVC bool
if err := plugin.Client.List(
ctx,
&jobList,
client.InNamespace(plugin.Namespace),
client.MatchingLabels{
utils.InstanceNameLabelName: instanceName,
},
Expand Down
9 changes: 7 additions & 2 deletions internal/cnpi/plugin/repository/setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,10 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro
}
}()

constructorLogger := log.FromContext(ctx).WithValues("pluginName", name)
constructorLogger := log.
FromContext(ctx).
WithName("setPluginProtocol").
WithValues("pluginName", name)
ctx = log.IntoContext(ctx, constructorLogger)

if handler, err = protocol.Dial(ctx); err != nil {
Expand All @@ -98,7 +101,9 @@ func (r *data) setPluginProtocol(name string, protocol connection.Protocol) erro
destructor := func(res connection.Interface) {
err := res.Close()
if err != nil {
destructorLogger := log.FromContext(context.Background()).WithValues("pluginName", res.Name())
destructorLogger := log.FromContext(context.Background()).
WithName("setPluginProtocol").
WithValues("pluginName", res.Name())
destructorLogger.Warning("Error while closing plugin connection", "err", err)
}
}
Expand Down
4 changes: 3 additions & 1 deletion internal/controller/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,9 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
ctx,
cluster,
apiv1.PhaseUnknownPlugin,
fmt.Sprintf("Unknown plugin %s", errUnknownPlugin.Name),
fmt.Sprintf("Unknown plugin: '%s'. "+
"This may be caused by the plugin not being loaded correctly by the operator. "+
"Check the operator and plugin logs for errors", errUnknownPlugin.Name),
)
}

Expand Down
4 changes: 3 additions & 1 deletion internal/management/istio/istio.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,16 @@ import (
// the service exists
func TryInvokeQuitEndpoint(ctx context.Context) error {
const endpoint = "http://localhost:15000/quitquitquit"
logger := log.FromContext(ctx)
logger := log.FromContext(ctx).WithName("try_invoke_quit_quit_endpoint")

clientHTTP := http.Client{Timeout: 5 * time.Second}
resp, err := clientHTTP.Post(endpoint, "", nil)
if errors.Is(err, syscall.ECONNREFUSED) || os.IsTimeout(err) {
logger.Debug("received ECONNREFUSED, ignoring the error", "endpoint", endpoint)
return nil
}
if err != nil {
logger.Error(err, "while invoking the /quitquitquit endpoint", "endpoint", endpoint)
return err
}
if closeErr := resp.Body.Close(); closeErr != nil {
Expand Down
13 changes: 7 additions & 6 deletions pkg/management/catalog/catalog.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,13 +219,15 @@ type BarmanBackup struct {
// The backup label
Label string `json:"backup_label"`

// The moment where the backup started
// The moment where the backup started, this value is retrieved from barman command output
// and is in the format "Mon Jan 2 15:04:05 2006" with the timezone being the local one
BeginTimeString string `json:"begin_time"`

// The moment where the backup ended
// The moment where the backup ended, his value is retrieved from barman command output
// and is in the format "Mon Jan 2 15:04:05 2006" with the timezone being the local one
EndTimeString string `json:"end_time"`

// The moment where the backup ended
// The moment where the backup started
BeginTime time.Time

// The moment where the backup ended
Expand Down Expand Up @@ -281,17 +283,16 @@ func (b *BarmanBackup) deserializeBackupTimeStrings() error {
const (
barmanTimeLayout = "Mon Jan 2 15:04:05 2006"
)

var err error
if b.BeginTimeString != "" {
b.BeginTime, err = time.Parse(barmanTimeLayout, b.BeginTimeString)
b.BeginTime, err = time.ParseInLocation(barmanTimeLayout, b.BeginTimeString, time.Local)
if err != nil {
return err
}
}

if b.EndTimeString != "" {
b.EndTime, err = time.Parse(barmanTimeLayout, b.EndTimeString)
b.EndTime, err = time.ParseInLocation(barmanTimeLayout, b.EndTimeString, time.Local)
if err != nil {
return err
}
Expand Down
39 changes: 39 additions & 0 deletions pkg/management/catalog/catalog_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,5 +245,44 @@ var _ = Describe("barman-cloud-backup-show parsing", func() {
Expect(result.SystemID).To(Equal("6885668674852188181"))
Expect(result.BeginTimeString).To(Equal("Tue Jan 19 03:14:08 2038"))
Expect(result.EndTimeString).To(Equal("Tue Jan 19 04:14:08 2038"))

// Test timezone set in the parsed time is equal to local one
Expect(result.BeginTime.Location()).To(Equal(time.Now().Location()))
Expect(result.EndTime.Location()).To(Equal(time.Now().Location()))
})

It("parses valid begin and end time strings correctly", func() {
backup := &BarmanBackup{
BeginTimeString: "Mon Jan 2 15:04:05 2006",
EndTimeString: "Tue Jan 3 15:04:05 2006",
}
err := backup.deserializeBackupTimeStrings()
Expect(err).ToNot(HaveOccurred())
Expect(backup.BeginTime).To(Equal(time.Date(2006, time.January, 2, 15, 4, 5, 0, time.Local)))
Expect(backup.EndTime).To(Equal(time.Date(2006, time.January, 3, 15, 4, 5, 0, time.Local)))
})

It("returns an error for invalid begin time string", func() {
backup := &BarmanBackup{
BeginTimeString: "invalid time string",
}
err := backup.deserializeBackupTimeStrings()
Expect(err).To(HaveOccurred())
})

It("returns an error for invalid end time string", func() {
backup := &BarmanBackup{
EndTimeString: "invalid time string",
}
err := backup.deserializeBackupTimeStrings()
Expect(err).To(HaveOccurred())
})

It("handles empty begin and end time strings gracefully", func() {
backup := &BarmanBackup{}
err := backup.deserializeBackupTimeStrings()
Expect(err).ToNot(HaveOccurred())
Expect(backup.BeginTime.IsZero()).To(BeTrue())
Expect(backup.EndTime.IsZero()).To(BeTrue())
})
})
16 changes: 8 additions & 8 deletions pkg/utils/logs/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,9 @@ type StreamingRequest struct {
Pod *v1.Pod
Options *v1.PodLogOptions
Previous bool `json:"previous,omitempty"`
// NOTE: the client argument may be omitted, but it is good practice to pass it
// NOTE: the Client argument may be omitted, but it is good practice to pass it
// Importantly, it makes the logging functions testable
client kubernetes.Interface
Client kubernetes.Interface
}

func (spl *StreamingRequest) getPodName() string {
Expand All @@ -63,14 +63,14 @@ func (spl *StreamingRequest) getLogOptions() *v1.PodLogOptions {
}

func (spl *StreamingRequest) getKubernetesClient() kubernetes.Interface {
if spl.client != nil {
return spl.client
if spl.Client != nil {
return spl.Client
}
conf := ctrl.GetConfigOrDie()

spl.client = kubernetes.NewForConfigOrDie(conf)
spl.Client = kubernetes.NewForConfigOrDie(conf)

return spl.client
return spl.Client
}

// getStreamToPod opens the REST request to the pod
Expand Down Expand Up @@ -125,7 +125,7 @@ func TailPodLogs(
Follow: true,
SinceTime: &now,
},
client: client,
Client: client,
}
return streamPodLog.Stream(ctx, writer)
}
Expand Down Expand Up @@ -154,7 +154,7 @@ func GetPodLogs(
Pod: &pod,
Previous: getPrevious,
Options: &v1.PodLogOptions{},
client: client,
Client: client,
}
logsRequest := streamPodLog.getStreamToPod()

Expand Down
4 changes: 2 additions & 2 deletions pkg/utils/logs/logs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ var _ = Describe("Pod logging tests", func() {
streamPodLog := StreamingRequest{
Pod: nil,
Options: podLogOptions,
client: client,
Client: client,
}
var logBuffer bytes.Buffer
err := streamPodLog.Stream(ctx, &logBuffer)
Expand Down Expand Up @@ -95,7 +95,7 @@ var _ = Describe("Pod logging tests", func() {
Pod: pod,
Options: podLogOptions,
Previous: false,
client: client,
Client: client,
}

var logBuffer bytes.Buffer
Expand Down
10 changes: 6 additions & 4 deletions tests/e2e/affinity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,12 @@ var _ = Describe("E2E Affinity", Serial, Label(tests.LabelPodScheduling), func()
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
}
return env.DeleteNamespace(namespace)
return env.CleanupNamespace(
namespace,
CurrentSpecReport().LeafNodeText,
CurrentSpecReport().Failed(),
GinkgoWriter,
)
})

AssertCreateCluster(namespace, clusterName, clusterFile, env)
Expand Down
10 changes: 6 additions & 4 deletions tests/e2e/apparmor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,12 @@ var _ = Describe("AppArmor support", Serial, Label(tests.LabelNoOpenshift, tests
namespace, err = env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
}
return env.DeleteNamespace(namespace)
return env.CleanupNamespace(
namespace,
CurrentSpecReport().LeafNodeText,
CurrentSpecReport().Failed(),
GinkgoWriter,
)
})

AssertCreateCluster(namespace, clusterName, clusterAppArmorFile, env)
Expand Down
10 changes: 6 additions & 4 deletions tests/e2e/architecture_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,12 @@ var _ = Describe("Available Architectures", Label(tests.LabelBasic), func() {
namespace, err := env.CreateUniqueNamespace(namespacePrefix)
Expect(err).ToNot(HaveOccurred())
DeferCleanup(func() error {
if CurrentSpecReport().Failed() {
env.DumpNamespaceObjects(namespace, "out/"+CurrentSpecReport().LeafNodeText+".log")
}
return env.DeleteNamespace(namespace)
return env.CleanupNamespace(
namespace,
CurrentSpecReport().LeafNodeText,
CurrentSpecReport().Failed(),
GinkgoWriter,
)
})

clusterName, err := env.GetResourceNameFromYAML(clusterManifest)
Expand Down
Loading
Loading