From 51f1ac28b738ec9f6bb0593520beb513118c2ea3 Mon Sep 17 00:00:00 2001 From: nouseforaname <34882943+nouseforaname@users.noreply.github.com> Date: Tue, 10 Sep 2024 14:34:11 +0200 Subject: [PATCH] refactor test - reorder expectations for log output, we noticed that they we're somewhat badly organized - fix CI flake: CI runs tests in parallel and they will flake when the dir that contains the lockfiles will be cleaned up prematurely. --- cmd/serve.go | 2 +- .../fake-uuid-provision.tf | 2 +- integrationtest/termination_recovery_test.go | 23 +++++++++++++------ pkg/providers/tf/deployment_manager.go | 3 ++- 4 files changed, 20 insertions(+), 10 deletions(-) diff --git a/cmd/serve.go b/cmd/serve.go index 22deeeb1e..4828a4ecc 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -259,7 +259,7 @@ func startServer(registry pakBroker.BrokerRegistry, db *sql.DB, brokerapi http.H logger.Info("received SIGTERM, server is shutting down gracefully allowing for in flight work to finish") defer shutdownRelease() for store.LockFilesExist() { - logger.Info("draining csb instance") + logger.Info("draining csb in progress") time.Sleep(time.Second * 1) } logger.Info("draining complete") diff --git a/integrationtest/fixtures/termination-recovery/fake-uuid-provision.tf b/integrationtest/fixtures/termination-recovery/fake-uuid-provision.tf index 1538fb5f9..ba683a089 100644 --- a/integrationtest/fixtures/termination-recovery/fake-uuid-provision.tf +++ b/integrationtest/fixtures/termination-recovery/fake-uuid-provision.tf @@ -10,7 +10,7 @@ terraform { } resource "null_resource" "sleeper" { provisioner "local-exec" { - command = "sleep 5" + command = "sleep 10" } } resource "random_uuid" "random" {} diff --git a/integrationtest/termination_recovery_test.go b/integrationtest/termination_recovery_test.go index 33b5b4df3..f28f6044d 100644 --- a/integrationtest/termination_recovery_test.go +++ b/integrationtest/termination_recovery_test.go @@ -15,7 +15,7 @@ import ( "github.com/pivotal-cf/brokerapi/v11/domain" ) -var _ = Describe("Recovery From Broker Termination", func() { +var _ = Describe("Recovery From Broker Termination", Ordered, func() { const ( serviceOfferingGUID = "083f2884-eb7b-11ee-96c7-174e35671015" servicePlanGUID = "0d953850-eb7b-11ee-bb2c-8ba95d780d82" @@ -27,13 +27,15 @@ var _ = Describe("Recovery From Broker Termination", func() { stdout *Buffer stderr *Buffer ) + AfterAll(func() { + os.RemoveAll("/tmp/csb/") + }) Describe("running csb on a VM", func() { BeforeEach(func() { brokerpak = must(packer.BuildBrokerpak(csb, fixtures("termination-recovery"))) stdout = NewBuffer() stderr = NewBuffer() - os.RemoveAll("/tmp/csb/") broker = must(testdrive.StartBroker(csb, brokerpak, database, testdrive.WithOutputs(stdout, stderr))) DeferCleanup(func() { @@ -57,14 +59,22 @@ var _ = Describe("Recovery From Broker Termination", func() { By("logging a message") Eventually(stdout).Should(Say("received SIGTERM")) + Eventually(stdout).Should(Say("draining csb in progress")) + lockFile, err := os.Stat(fmt.Sprintf("/tmp/csb/tf_%s_", instanceGUID)) + Expect(err).ToNot(HaveOccurred()) + Expect(lockFile).ToNot(BeNil()) By("ensuring that the broker rejects requests") Expect(broker.Client.LastOperation(instanceGUID, uuid.NewString()).Error).To(HaveOccurred()) - Eventually(stdout, time.Minute).Should(Say("draining csb")) - Eventually(stdout, time.Minute).Should(Say(fmt.Sprintf("successfully applied tf:%s", instanceGUID))) - Eventually(stdout, time.Minute).Should(Say("draining complete")) - Eventually(stdout, time.Minute).ShouldNot(Say("shutdown error")) + // Fun stuff, do not optimize this with a SatisfyAll().. The relevant part of the docs is: + // When Say succeeds, it fast forwards the gbytes.Buffer's read cursor to just after the successful match. + // meaning if below lines, will be partially matched at first attempt, no further attempt can succeed because we + // forwarded past the location of the initial first match. + + Eventually(stdout, time.Second*20).Should(Say(fmt.Sprintf("successfully stored state for tf:%s:", instanceGUID))) + Eventually(stdout, time.Second*20).Should(Say("draining complete")) + Consistently(stderr, time.Second*20).ShouldNot(Say("shutdown error")) broker = must(testdrive.StartBroker(csb, brokerpak, database, testdrive.WithOutputs(stdout, stderr))) @@ -85,7 +95,6 @@ var _ = Describe("Recovery From Broker Termination", func() { stdout = NewBuffer() stderr = NewBuffer() - os.RemoveAll("/tmp/csb/") broker = must(testdrive.StartBroker(csb, brokerpak, database, testdrive.WithOutputs(stdout, stderr), testdrive.WithEnv("CF_INSTANCE_GUID=dcfa061e-c0e3-4237-a805-734578347393"))) DeferCleanup(func() { diff --git a/pkg/providers/tf/deployment_manager.go b/pkg/providers/tf/deployment_manager.go index f71a1037c..3a3f6c19b 100644 --- a/pkg/providers/tf/deployment_manager.go +++ b/pkg/providers/tf/deployment_manager.go @@ -80,8 +80,9 @@ func (d *DeploymentManager) MarkOperationFinished(deployment *storage.TerraformD "deploymentID": deployment.ID, "message": deployment.LastOperationMessage, }) + } else { + d.logger.Info(fmt.Sprintf("successfully stored state for %s", deployment.ID)) } - d.logger.Info(fmt.Sprintf("successfully applied %s", deployment.ID)) return d.store.RemoveLockFile(deployment.ID) }