From 4c38556e7525d0100049a6516276970b65fc2384 Mon Sep 17 00:00:00 2001 From: Dimitar Panayotov Date: Fri, 10 May 2019 15:57:18 +0300 Subject: [PATCH] Notifications (#64) --- Gopkg.lock | 104 +-- Gopkg.toml | 20 +- pkg/platform/broker_client.go | 43 +- .../platformfakes/fake_broker_client.go | 86 ++- .../platformfakes/fake_catalog_fetcher.go | 6 +- pkg/platform/platformfakes/fake_client.go | 4 +- .../platformfakes/fake_visibility_client.go | 75 +-- pkg/platform/types.go | 64 -- pkg/platform/visibility_client.go | 30 +- pkg/sbproxy/notifications/consumer.go | 55 ++ .../notifications/handlers/broker_handler.go | 245 +++++++ .../handlers/broker_handler_test.go | 615 ++++++++++++++++++ .../handlers/handlers_suite_test.go | 24 + pkg/sbproxy/notifications/handlers/helpers.go | 25 + .../notifications/handlers/helpers_test.go | 81 +++ .../handlers/visibilities_handler.go | 227 +++++++ .../handlers/visibility_handler_test.go | 610 +++++++++++++++++ pkg/sbproxy/notifications/producer.go | 355 ++++++++++ pkg/sbproxy/notifications/producer_test.go | 567 ++++++++++++++++ pkg/sbproxy/options.go | 13 +- pkg/sbproxy/options_test.go | 5 +- pkg/sbproxy/reconcile/reconcile_brokers.go | 97 ++- .../reconcile/reconcile_brokers_test.go | 31 +- pkg/sbproxy/reconcile/reconcile_settings.go | 5 +- pkg/sbproxy/reconcile/reconcile_task.go | 131 ---- pkg/sbproxy/reconcile/reconcile_task_test.go | 100 --- .../reconcile/reconcile_visibilities.go | 205 +++--- .../reconcile/reconcile_visibilities_test.go | 220 ++++--- pkg/sbproxy/reconcile/reconciler.go | 91 +++ pkg/sbproxy/reconcile/reconciler_test.go | 174 +++++ pkg/sbproxy/reconcile/resyncer.go | 60 ++ pkg/sbproxy/sbproxy.go | 108 ++- pkg/sm/client_test.go | 32 +- pkg/sm/options.go | 39 +- pkg/sm/options_test.go | 10 +- pkg/sm/types.go | 7 +- 36 files changed, 3807 insertions(+), 757 deletions(-) delete mode 100644 pkg/platform/types.go create mode 100644 pkg/sbproxy/notifications/consumer.go create mode 100644 pkg/sbproxy/notifications/handlers/broker_handler.go create mode 100644 pkg/sbproxy/notifications/handlers/broker_handler_test.go create mode 100644 pkg/sbproxy/notifications/handlers/handlers_suite_test.go create mode 100644 pkg/sbproxy/notifications/handlers/helpers.go create mode 100644 pkg/sbproxy/notifications/handlers/helpers_test.go create mode 100644 pkg/sbproxy/notifications/handlers/visibilities_handler.go create mode 100644 pkg/sbproxy/notifications/handlers/visibility_handler_test.go create mode 100644 pkg/sbproxy/notifications/producer.go create mode 100644 pkg/sbproxy/notifications/producer_test.go delete mode 100644 pkg/sbproxy/reconcile/reconcile_task.go delete mode 100644 pkg/sbproxy/reconcile/reconcile_task_test.go create mode 100644 pkg/sbproxy/reconcile/reconciler.go create mode 100644 pkg/sbproxy/reconcile/reconciler_test.go create mode 100644 pkg/sbproxy/reconcile/resyncer.go diff --git a/Gopkg.lock b/Gopkg.lock index ab2df68a..7610121d 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,7 +2,7 @@ [[projects]] - digest = "1:6eec080e3b8f4e2a2d88973bfeced0aea02d4c84befd1211f9591a0c97b56fe1" + digest = "1:064f5156d2cd3edd55c3b2710ae9c117a93a454b8a991ced7794a7f434d0a47f" name = "github.com/Peripli/service-manager" packages = [ "api", @@ -11,6 +11,7 @@ "api/filters/authn/oauth", "api/healthcheck", "api/info", + "api/notifications", "api/osb", "cf", "config", @@ -30,15 +31,18 @@ "pkg/util", "pkg/util/slice", "pkg/web", + "pkg/ws", "storage", "storage/catalog", "storage/interceptors", "storage/postgres", + "storage/postgres/notification_connection", "test/common", + "test/testutil", ] pruneopts = "NT" - revision = "b00c515277cde8158eb7fe0b3ff3b706a1ac0940" - version = "v0.2.3" + revision = "0cfafbf48bd477e7ef9626b20b938d7fc5a3a837" + version = "v0.3.0" [[projects]] digest = "1:f780d408067189c4c42b53f7bb24ebf8fd2a1e4510b813ed6e79dd6563e38cc5" @@ -95,11 +99,11 @@ [[projects]] branch = "master" - digest = "1:fbc02c2b3b78e6bc30228f3bae582f1fcea74a050cbdbf42f24eff12bef8eab4" + digest = "1:b48ef4624ce31088e81b106c90fa290193d728fd2b4afbdc361c6c3e26ffcfaf" name = "github.com/gavv/monotime" packages = ["."] pruneopts = "UT" - revision = "6f8212e8d10df7383609d3c377ca08884d8f3ec0" + revision = "30dba43534243e3484a34676a0f068d12b989f84" [[projects]] digest = "1:d57cb1af013b165b6907abb4f844f9e751e37aa55f3c0156c60dd5028cf0a7c1" @@ -188,6 +192,14 @@ revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" version = "v1.6.1" +[[projects]] + digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + [[projects]] digest = "1:c0d19ab64b32ce9fe5cf4ddceba78d5bc9807f0016db6b1183599da3dcc24d10" name = "github.com/hashicorp/hcl" @@ -231,7 +243,7 @@ [[projects]] branch = "master" - digest = "1:5fe871eb8905c1f38d41a136a678125e5cdf9238c6c9a1711180b9b84b5c33a2" + digest = "1:2c7f9e2e9c7729a1785f57f95e0bbfddc384f11073e8213994d11113210644a0" name = "github.com/jmoiron/sqlx" packages = [ ".", @@ -239,7 +251,7 @@ "types", ] pruneopts = "UT" - revision = "1d3423c595d749e4613fce663591b44ae539d377" + revision = "38398a30ed8516ffda617a04c822de09df8a3ec5" [[projects]] digest = "1:aaa8e0e7e35d92e21daed3f241832cee73d15ca1cd3302ba3843159a959a7eac" @@ -250,20 +262,28 @@ "zlib", ] pruneopts = "UT" - revision = "30be6041bed523c18e269a700ebd9c2ea9328574" - version = "v1.4.1" + revision = "b4b6d9e7f099f14b3a4a08f0664986cd4c85ebd6" + version = "v1.5.0" [[projects]] - digest = "1:2d643962fac133904694fffa959bc3c5dcfdcee38c6f5ffdd99a3c93eb9c835c" + digest = "1:923c4d7194b42e054b2eb8a6c62824ac55e23ececc1c7e48d4da69c971c55954" name = "github.com/klauspost/cpuid" packages = ["."] pruneopts = "UT" - revision = "e7e905edc00ea8827e58662220139109efea09db" - version = "v1.2.0" + revision = "05a8198c0f5a27739aec358908d7e12c64ce6eb7" + version = "v1.2.1" + +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" [[projects]] branch = "master" - digest = "1:bc1c0be40c67b6b4aee09d7508d5a2a52c1c116b1fa43806dad2b0d6b4d4003b" + digest = "1:2abaafc9cb59897a71c84f1daf4131d59c0dd349c671206274ace759730fc1a0" name = "github.com/lib/pq" packages = [ ".", @@ -271,7 +291,7 @@ "scram", ] pruneopts = "UT" - revision = "51e2106eed1cea199c802d2a49e91e2491b02056" + revision = "2ff3cb3adc01768e0a552b3a02575a6df38a9bea" [[projects]] digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7" @@ -303,7 +323,7 @@ name = "github.com/onrik/logrus" packages = ["filename"] pruneopts = "UT" - revision = "05335c5abaa99d4c7a3f36df162b483106ef38a8" + revision = "0331655e8b9ec13423e2b31914d0a2154b13474e" [[projects]] digest = "1:b013cb97ec8147b033199dfc5360d410130e1fc5d74dd6f08c4d777fe49b5c37" @@ -364,12 +384,12 @@ version = "v2.1.0" [[projects]] - digest = "1:e0f50a07c0def90588d69f77178712c6fdc67eb6576365f551cce98b44b501bf" + digest = "1:93131d8002d7025da13582877c32d1fc302486775a1b06f62241741006428c5e" name = "github.com/pelletier/go-toml" packages = ["."] pruneopts = "UT" - revision = "63909f0a90ab0f36909e8e044e46ace10cf13ba2" - version = "v1.3.0" + revision = "728039f679cbcd4f6a54e080d2219a4c4928c546" + version = "v1.4.0" [[projects]] digest = "1:cf31692c14422fa27c83a05292eb5cbe0fb2775972e8f1f8446a71549bd8980b" @@ -406,14 +426,6 @@ pruneopts = "UT" revision = "1555304b9b35fdd2b425bccf1a5613677705e7d0" -[[projects]] - branch = "master" - digest = "1:ed615c5430ecabbb0fb7629a182da65ecee6523900ac1ac932520860878ffcad" - name = "github.com/robfig/cron" - packages = ["."] - pruneopts = "UT" - revision = "b41be1df696709bb6395fe435af20370037c0b4c" - [[projects]] digest = "1:d917313f309bda80d27274d53985bc65651f81a5b66b820749ac7f8ef061fd04" name = "github.com/sergi/go-diff" @@ -423,12 +435,12 @@ version = "v1.0.0" [[projects]] - digest = "1:5622116f2c79239f2d25d47b881e14f96a8b8c17b63b8a8326a38ee1a332b007" + digest = "1:fd61cf4ae1953d55df708acb6b91492d538f49c305b364a014049914495db426" name = "github.com/sirupsen/logrus" packages = ["."] pruneopts = "UT" - revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba" - version = "v1.0.4" + revision = "8bdbc7bcc01dcbb8ec23dc8a28e332258d25251f" + version = "v1.4.1" [[projects]] digest = "1:bb495ec276ab82d3dd08504bbc0594a65de8c3b22c6f2aaa92d05b73fbf3a82e" @@ -589,20 +601,19 @@ [[projects]] branch = "master" - digest = "1:bac77e18329fa88ea880b9a7cfcb8fd25d8b835397dd9143f4b162a43e35bb60" + digest = "1:b8fa1ff0fc20983395978b3f771bb10438accbfe19326b02e236c1d4bf1c91b2" name = "golang.org/x/crypto" packages = [ "ed25519", "ed25519/internal/edwards25519", "pbkdf2", - "ssh/terminal", ] pruneopts = "UT" - revision = "f416ebab96af27ca70b6e5c23d6a0747530da626" + revision = "cbcb750295291b33242907a04be40e80801d0cfc" [[projects]] branch = "master" - digest = "1:e6b963cfd255c232a9613b0d12438d2972b055915ede2f6d905f8da113a86dc7" + digest = "1:a3618b1fc378635b02a2e0eef5694311e1e29137c562210ad46999d8f3e0cc2c" name = "golang.org/x/net" packages = [ "context", @@ -614,7 +625,7 @@ "publicsuffix", ] pruneopts = "UT" - revision = "1da14a5a36f220ea3f03470682b737b1dfd5de22" + revision = "a4d6f7feada510cc50e69a37b484cb0fdc6b7876" [[projects]] branch = "master" @@ -629,17 +640,14 @@ [[projects]] branch = "master" - digest = "1:32a91fdcb8f4ae0b2376129fe0091963fa1bbf42d4c1826ad41f7b6410f385c0" + digest = "1:4fc43e824f7e546353de724c00507b1d9c0bab56344b10c2330a97f166a0fba7" name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - ] + packages = ["unix"] pruneopts = "UT" - revision = "12500544f89f9420afe9529ba8940bf72d294972" + revision = "a5b02f93d862f065920dd6a40dddc66b60d0dec4" [[projects]] - digest = "1:436b24586f8fee329e0dd65fd67c817681420cda1d7f934345c13fe78c212a73" + digest = "1:28deae5fe892797ff37a317b5bcda96d11d1c90dadd89f1337651df3bc4c586e" name = "golang.org/x/text" packages = [ "collate", @@ -656,6 +664,8 @@ "encoding/unicode", "internal/colltab", "internal/gen", + "internal/language", + "internal/language/compact", "internal/tag", "internal/triegen", "internal/ucd", @@ -670,8 +680,8 @@ "unicode/rangetable", ] pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" + revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" + version = "v0.3.2" [[projects]] digest = "1:6eb6e3b6d9fffb62958cf7f7d88dbbe1dd6839436b0802e194c590667a40412a" @@ -733,30 +743,36 @@ "github.com/Peripli/service-manager/api/filters", "github.com/Peripli/service-manager/api/filters/authn/basic", "github.com/Peripli/service-manager/api/healthcheck", + "github.com/Peripli/service-manager/api/notifications", "github.com/Peripli/service-manager/api/osb", "github.com/Peripli/service-manager/pkg/env", "github.com/Peripli/service-manager/pkg/env/envfakes", "github.com/Peripli/service-manager/pkg/health", "github.com/Peripli/service-manager/pkg/log", + "github.com/Peripli/service-manager/pkg/query", "github.com/Peripli/service-manager/pkg/security/filters", "github.com/Peripli/service-manager/pkg/server", "github.com/Peripli/service-manager/pkg/types", "github.com/Peripli/service-manager/pkg/util", "github.com/Peripli/service-manager/pkg/web", + "github.com/Peripli/service-manager/storage/interceptors", "github.com/Peripli/service-manager/test/common", + "github.com/Peripli/service-manager/test/testutil", "github.com/fatih/structs", "github.com/gavv/httpexpect", "github.com/gofrs/uuid", + "github.com/gorilla/websocket", "github.com/onsi/ginkgo", "github.com/onsi/ginkgo/extensions/table", "github.com/onsi/gomega", "github.com/onsi/gomega/ghttp", "github.com/patrickmn/go-cache", "github.com/pkg/errors", - "github.com/robfig/cron", "github.com/sirupsen/logrus", "github.com/spf13/cast", "github.com/spf13/pflag", + "github.com/tidwall/gjson", + "github.com/tidwall/sjson", ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 728ebfa3..5816317b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -19,7 +19,7 @@ [[constraint]] name = "github.com/sirupsen/logrus" - version = "=1.0.4" + version = "=1.4.1" [[constraint]] name = "github.com/onsi/ginkgo" @@ -43,7 +43,7 @@ [[constraint]] name = "github.com/Peripli/service-manager" - version = "=0.2.3" + version = "=0.3.0" # Refer to issue https://github.com/golang/dep/issues/1799 [[override]] @@ -53,3 +53,19 @@ [[constraint]] name = "github.com/patrickmn/go-cache" version = "2.1.0" + +[[constraint]] + name = "github.com/gorilla/websocket" + version = "1.4.0" + +[[constraint]] + name = "github.com/tidwall/gjson" + version = "v1.1.3" + +[[constraint]] + name = "github.com/tidwall/sjson" + version = "v1.0.3" + +[[constraint]] + name = "github.com/gofrs/uuid" + version = "=3.1.0" diff --git a/pkg/platform/broker_client.go b/pkg/platform/broker_client.go index f23b8e87..8d45c00e 100644 --- a/pkg/platform/broker_client.go +++ b/pkg/platform/broker_client.go @@ -16,7 +16,45 @@ package platform -import "context" +import ( + "context" + "encoding/json" + + "github.com/Peripli/service-manager/pkg/types" +) + +// CreateServiceBrokerRequest type used for requests by the platform client +type CreateServiceBrokerRequest struct { + Name string `json:"name"` + BrokerURL string `json:"broker_url"` +} + +// UpdateServiceBrokerRequest type used for requests by the platform client +type UpdateServiceBrokerRequest struct { + GUID string `json:"guid"` + Name string `json:"name"` + BrokerURL string `json:"broker_url"` +} + +// DeleteServiceBrokerRequest type used for requests by the platform client +type DeleteServiceBrokerRequest struct { + GUID string `json:"guid"` + Name string `json:"name"` +} + +// ServiceBroker type for responses from the platform client +type ServiceBroker struct { + GUID string `json:"guid"` + Name string `json:"name"` + BrokerURL string `json:"broker_url"` + ServiceOfferings []types.ServiceOffering `json:"services"` + Metadata map[string]json.RawMessage `json:"metadata"` +} + +// ServiceBrokerList type for responses from the platform client +type ServiceBrokerList struct { + ServiceBrokers []ServiceBroker `json:"service_brokers"` +} // BrokerClient provides the logic for calling into the underlying platform and performing platform specific operations //go:generate counterfeiter . BrokerClient @@ -24,6 +62,9 @@ type BrokerClient interface { // GetBrokers obtains the registered brokers in the platform GetBrokers(ctx context.Context) ([]ServiceBroker, error) + // GetBrokerByName returns the broker from the platform with the specified name + GetBrokerByName(ctx context.Context, name string) (*ServiceBroker, error) + // CreateBroker registers a new broker at the platform CreateBroker(ctx context.Context, r *CreateServiceBrokerRequest) (*ServiceBroker, error) diff --git a/pkg/platform/platformfakes/fake_broker_client.go b/pkg/platform/platformfakes/fake_broker_client.go index e4de2a38..28b08d5a 100644 --- a/pkg/platform/platformfakes/fake_broker_client.go +++ b/pkg/platform/platformfakes/fake_broker_client.go @@ -2,10 +2,10 @@ package platformfakes import ( - context "context" - sync "sync" + "context" + "sync" - platform "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/platform" ) type FakeBrokerClient struct { @@ -35,6 +35,20 @@ type FakeBrokerClient struct { deleteBrokerReturnsOnCall map[int]struct { result1 error } + GetBrokerByNameStub func(context.Context, string) (*platform.ServiceBroker, error) + getBrokerByNameMutex sync.RWMutex + getBrokerByNameArgsForCall []struct { + arg1 context.Context + arg2 string + } + getBrokerByNameReturns struct { + result1 *platform.ServiceBroker + result2 error + } + getBrokerByNameReturnsOnCall map[int]struct { + result1 *platform.ServiceBroker + result2 error + } GetBrokersStub func(context.Context) ([]platform.ServiceBroker, error) getBrokersMutex sync.RWMutex getBrokersArgsForCall []struct { @@ -191,6 +205,70 @@ func (fake *FakeBrokerClient) DeleteBrokerReturnsOnCall(i int, result1 error) { }{result1} } +func (fake *FakeBrokerClient) GetBrokerByName(arg1 context.Context, arg2 string) (*platform.ServiceBroker, error) { + fake.getBrokerByNameMutex.Lock() + ret, specificReturn := fake.getBrokerByNameReturnsOnCall[len(fake.getBrokerByNameArgsForCall)] + fake.getBrokerByNameArgsForCall = append(fake.getBrokerByNameArgsForCall, struct { + arg1 context.Context + arg2 string + }{arg1, arg2}) + fake.recordInvocation("GetBrokerByName", []interface{}{arg1, arg2}) + fake.getBrokerByNameMutex.Unlock() + if fake.GetBrokerByNameStub != nil { + return fake.GetBrokerByNameStub(arg1, arg2) + } + if specificReturn { + return ret.result1, ret.result2 + } + fakeReturns := fake.getBrokerByNameReturns + return fakeReturns.result1, fakeReturns.result2 +} + +func (fake *FakeBrokerClient) GetBrokerByNameCallCount() int { + fake.getBrokerByNameMutex.RLock() + defer fake.getBrokerByNameMutex.RUnlock() + return len(fake.getBrokerByNameArgsForCall) +} + +func (fake *FakeBrokerClient) GetBrokerByNameCalls(stub func(context.Context, string) (*platform.ServiceBroker, error)) { + fake.getBrokerByNameMutex.Lock() + defer fake.getBrokerByNameMutex.Unlock() + fake.GetBrokerByNameStub = stub +} + +func (fake *FakeBrokerClient) GetBrokerByNameArgsForCall(i int) (context.Context, string) { + fake.getBrokerByNameMutex.RLock() + defer fake.getBrokerByNameMutex.RUnlock() + argsForCall := fake.getBrokerByNameArgsForCall[i] + return argsForCall.arg1, argsForCall.arg2 +} + +func (fake *FakeBrokerClient) GetBrokerByNameReturns(result1 *platform.ServiceBroker, result2 error) { + fake.getBrokerByNameMutex.Lock() + defer fake.getBrokerByNameMutex.Unlock() + fake.GetBrokerByNameStub = nil + fake.getBrokerByNameReturns = struct { + result1 *platform.ServiceBroker + result2 error + }{result1, result2} +} + +func (fake *FakeBrokerClient) GetBrokerByNameReturnsOnCall(i int, result1 *platform.ServiceBroker, result2 error) { + fake.getBrokerByNameMutex.Lock() + defer fake.getBrokerByNameMutex.Unlock() + fake.GetBrokerByNameStub = nil + if fake.getBrokerByNameReturnsOnCall == nil { + fake.getBrokerByNameReturnsOnCall = make(map[int]struct { + result1 *platform.ServiceBroker + result2 error + }) + } + fake.getBrokerByNameReturnsOnCall[i] = struct { + result1 *platform.ServiceBroker + result2 error + }{result1, result2} +} + func (fake *FakeBrokerClient) GetBrokers(arg1 context.Context) ([]platform.ServiceBroker, error) { fake.getBrokersMutex.Lock() ret, specificReturn := fake.getBrokersReturnsOnCall[len(fake.getBrokersArgsForCall)] @@ -325,6 +403,8 @@ func (fake *FakeBrokerClient) Invocations() map[string][][]interface{} { defer fake.createBrokerMutex.RUnlock() fake.deleteBrokerMutex.RLock() defer fake.deleteBrokerMutex.RUnlock() + fake.getBrokerByNameMutex.RLock() + defer fake.getBrokerByNameMutex.RUnlock() fake.getBrokersMutex.RLock() defer fake.getBrokersMutex.RUnlock() fake.updateBrokerMutex.RLock() diff --git a/pkg/platform/platformfakes/fake_catalog_fetcher.go b/pkg/platform/platformfakes/fake_catalog_fetcher.go index fc5b3364..0f6233e0 100644 --- a/pkg/platform/platformfakes/fake_catalog_fetcher.go +++ b/pkg/platform/platformfakes/fake_catalog_fetcher.go @@ -2,10 +2,10 @@ package platformfakes import ( - context "context" - sync "sync" + "context" + "sync" - platform "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/platform" ) type FakeCatalogFetcher struct { diff --git a/pkg/platform/platformfakes/fake_client.go b/pkg/platform/platformfakes/fake_client.go index d882c336..cde031bc 100644 --- a/pkg/platform/platformfakes/fake_client.go +++ b/pkg/platform/platformfakes/fake_client.go @@ -2,9 +2,9 @@ package platformfakes import ( - sync "sync" + "sync" - platform "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/platform" ) type FakeClient struct { diff --git a/pkg/platform/platformfakes/fake_visibility_client.go b/pkg/platform/platformfakes/fake_visibility_client.go index 5ea8434e..b9c19019 100644 --- a/pkg/platform/platformfakes/fake_visibility_client.go +++ b/pkg/platform/platformfakes/fake_visibility_client.go @@ -2,21 +2,18 @@ package platformfakes import ( - context "context" - json "encoding/json" - sync "sync" + "context" + "sync" - platform "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/platform" ) type FakeVisibilityClient struct { - DisableAccessForPlanStub func(context.Context, json.RawMessage, string, string) error + DisableAccessForPlanStub func(context.Context, *platform.ModifyPlanAccessRequest) error disableAccessForPlanMutex sync.RWMutex disableAccessForPlanArgsForCall []struct { arg1 context.Context - arg2 json.RawMessage - arg3 string - arg4 string + arg2 *platform.ModifyPlanAccessRequest } disableAccessForPlanReturns struct { result1 error @@ -24,13 +21,11 @@ type FakeVisibilityClient struct { disableAccessForPlanReturnsOnCall map[int]struct { result1 error } - EnableAccessForPlanStub func(context.Context, json.RawMessage, string, string) error + EnableAccessForPlanStub func(context.Context, *platform.ModifyPlanAccessRequest) error enableAccessForPlanMutex sync.RWMutex enableAccessForPlanArgsForCall []struct { arg1 context.Context - arg2 json.RawMessage - arg3 string - arg4 string + arg2 *platform.ModifyPlanAccessRequest } enableAccessForPlanReturns struct { result1 error @@ -38,18 +33,18 @@ type FakeVisibilityClient struct { enableAccessForPlanReturnsOnCall map[int]struct { result1 error } - GetVisibilitiesByBrokersStub func(context.Context, []string) ([]*platform.ServiceVisibilityEntity, error) + GetVisibilitiesByBrokersStub func(context.Context, []string) ([]*platform.Visibility, error) getVisibilitiesByBrokersMutex sync.RWMutex getVisibilitiesByBrokersArgsForCall []struct { arg1 context.Context arg2 []string } getVisibilitiesByBrokersReturns struct { - result1 []*platform.ServiceVisibilityEntity + result1 []*platform.Visibility result2 error } getVisibilitiesByBrokersReturnsOnCall map[int]struct { - result1 []*platform.ServiceVisibilityEntity + result1 []*platform.Visibility result2 error } VisibilityScopeLabelKeyStub func() string @@ -66,19 +61,17 @@ type FakeVisibilityClient struct { invocationsMutex sync.RWMutex } -func (fake *FakeVisibilityClient) DisableAccessForPlan(arg1 context.Context, arg2 json.RawMessage, arg3 string, arg4 string) error { +func (fake *FakeVisibilityClient) DisableAccessForPlan(arg1 context.Context, arg2 *platform.ModifyPlanAccessRequest) error { fake.disableAccessForPlanMutex.Lock() ret, specificReturn := fake.disableAccessForPlanReturnsOnCall[len(fake.disableAccessForPlanArgsForCall)] fake.disableAccessForPlanArgsForCall = append(fake.disableAccessForPlanArgsForCall, struct { arg1 context.Context - arg2 json.RawMessage - arg3 string - arg4 string - }{arg1, arg2, arg3, arg4}) - fake.recordInvocation("DisableAccessForPlan", []interface{}{arg1, arg2, arg3, arg4}) + arg2 *platform.ModifyPlanAccessRequest + }{arg1, arg2}) + fake.recordInvocation("DisableAccessForPlan", []interface{}{arg1, arg2}) fake.disableAccessForPlanMutex.Unlock() if fake.DisableAccessForPlanStub != nil { - return fake.DisableAccessForPlanStub(arg1, arg2, arg3, arg4) + return fake.DisableAccessForPlanStub(arg1, arg2) } if specificReturn { return ret.result1 @@ -93,17 +86,17 @@ func (fake *FakeVisibilityClient) DisableAccessForPlanCallCount() int { return len(fake.disableAccessForPlanArgsForCall) } -func (fake *FakeVisibilityClient) DisableAccessForPlanCalls(stub func(context.Context, json.RawMessage, string, string) error) { +func (fake *FakeVisibilityClient) DisableAccessForPlanCalls(stub func(context.Context, *platform.ModifyPlanAccessRequest) error) { fake.disableAccessForPlanMutex.Lock() defer fake.disableAccessForPlanMutex.Unlock() fake.DisableAccessForPlanStub = stub } -func (fake *FakeVisibilityClient) DisableAccessForPlanArgsForCall(i int) (context.Context, json.RawMessage, string, string) { +func (fake *FakeVisibilityClient) DisableAccessForPlanArgsForCall(i int) (context.Context, *platform.ModifyPlanAccessRequest) { fake.disableAccessForPlanMutex.RLock() defer fake.disableAccessForPlanMutex.RUnlock() argsForCall := fake.disableAccessForPlanArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 + return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeVisibilityClient) DisableAccessForPlanReturns(result1 error) { @@ -129,19 +122,17 @@ func (fake *FakeVisibilityClient) DisableAccessForPlanReturnsOnCall(i int, resul }{result1} } -func (fake *FakeVisibilityClient) EnableAccessForPlan(arg1 context.Context, arg2 json.RawMessage, arg3 string, arg4 string) error { +func (fake *FakeVisibilityClient) EnableAccessForPlan(arg1 context.Context, arg2 *platform.ModifyPlanAccessRequest) error { fake.enableAccessForPlanMutex.Lock() ret, specificReturn := fake.enableAccessForPlanReturnsOnCall[len(fake.enableAccessForPlanArgsForCall)] fake.enableAccessForPlanArgsForCall = append(fake.enableAccessForPlanArgsForCall, struct { arg1 context.Context - arg2 json.RawMessage - arg3 string - arg4 string - }{arg1, arg2, arg3, arg4}) - fake.recordInvocation("EnableAccessForPlan", []interface{}{arg1, arg2, arg3, arg4}) + arg2 *platform.ModifyPlanAccessRequest + }{arg1, arg2}) + fake.recordInvocation("EnableAccessForPlan", []interface{}{arg1, arg2}) fake.enableAccessForPlanMutex.Unlock() if fake.EnableAccessForPlanStub != nil { - return fake.EnableAccessForPlanStub(arg1, arg2, arg3, arg4) + return fake.EnableAccessForPlanStub(arg1, arg2) } if specificReturn { return ret.result1 @@ -156,17 +147,17 @@ func (fake *FakeVisibilityClient) EnableAccessForPlanCallCount() int { return len(fake.enableAccessForPlanArgsForCall) } -func (fake *FakeVisibilityClient) EnableAccessForPlanCalls(stub func(context.Context, json.RawMessage, string, string) error) { +func (fake *FakeVisibilityClient) EnableAccessForPlanCalls(stub func(context.Context, *platform.ModifyPlanAccessRequest) error) { fake.enableAccessForPlanMutex.Lock() defer fake.enableAccessForPlanMutex.Unlock() fake.EnableAccessForPlanStub = stub } -func (fake *FakeVisibilityClient) EnableAccessForPlanArgsForCall(i int) (context.Context, json.RawMessage, string, string) { +func (fake *FakeVisibilityClient) EnableAccessForPlanArgsForCall(i int) (context.Context, *platform.ModifyPlanAccessRequest) { fake.enableAccessForPlanMutex.RLock() defer fake.enableAccessForPlanMutex.RUnlock() argsForCall := fake.enableAccessForPlanArgsForCall[i] - return argsForCall.arg1, argsForCall.arg2, argsForCall.arg3, argsForCall.arg4 + return argsForCall.arg1, argsForCall.arg2 } func (fake *FakeVisibilityClient) EnableAccessForPlanReturns(result1 error) { @@ -192,7 +183,7 @@ func (fake *FakeVisibilityClient) EnableAccessForPlanReturnsOnCall(i int, result }{result1} } -func (fake *FakeVisibilityClient) GetVisibilitiesByBrokers(arg1 context.Context, arg2 []string) ([]*platform.ServiceVisibilityEntity, error) { +func (fake *FakeVisibilityClient) GetVisibilitiesByBrokers(arg1 context.Context, arg2 []string) ([]*platform.Visibility, error) { var arg2Copy []string if arg2 != nil { arg2Copy = make([]string, len(arg2)) @@ -222,7 +213,7 @@ func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersCallCount() int { return len(fake.getVisibilitiesByBrokersArgsForCall) } -func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersCalls(stub func(context.Context, []string) ([]*platform.ServiceVisibilityEntity, error)) { +func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersCalls(stub func(context.Context, []string) ([]*platform.Visibility, error)) { fake.getVisibilitiesByBrokersMutex.Lock() defer fake.getVisibilitiesByBrokersMutex.Unlock() fake.GetVisibilitiesByBrokersStub = stub @@ -235,28 +226,28 @@ func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersArgsForCall(i int) (co return argsForCall.arg1, argsForCall.arg2 } -func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersReturns(result1 []*platform.ServiceVisibilityEntity, result2 error) { +func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersReturns(result1 []*platform.Visibility, result2 error) { fake.getVisibilitiesByBrokersMutex.Lock() defer fake.getVisibilitiesByBrokersMutex.Unlock() fake.GetVisibilitiesByBrokersStub = nil fake.getVisibilitiesByBrokersReturns = struct { - result1 []*platform.ServiceVisibilityEntity + result1 []*platform.Visibility result2 error }{result1, result2} } -func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersReturnsOnCall(i int, result1 []*platform.ServiceVisibilityEntity, result2 error) { +func (fake *FakeVisibilityClient) GetVisibilitiesByBrokersReturnsOnCall(i int, result1 []*platform.Visibility, result2 error) { fake.getVisibilitiesByBrokersMutex.Lock() defer fake.getVisibilitiesByBrokersMutex.Unlock() fake.GetVisibilitiesByBrokersStub = nil if fake.getVisibilitiesByBrokersReturnsOnCall == nil { fake.getVisibilitiesByBrokersReturnsOnCall = make(map[int]struct { - result1 []*platform.ServiceVisibilityEntity + result1 []*platform.Visibility result2 error }) } fake.getVisibilitiesByBrokersReturnsOnCall[i] = struct { - result1 []*platform.ServiceVisibilityEntity + result1 []*platform.Visibility result2 error }{result1, result2} } diff --git a/pkg/platform/types.go b/pkg/platform/types.go deleted file mode 100644 index b9dde91d..00000000 --- a/pkg/platform/types.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2018 The Service Manager Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package platform - -import ( - "encoding/json" - - "github.com/Peripli/service-manager/pkg/types" -) - -// CreateServiceBrokerRequest type used for requests by the platform client -type CreateServiceBrokerRequest struct { - Name string `json:"name"` - BrokerURL string `json:"broker_url"` -} - -// UpdateServiceBrokerRequest type used for requests by the platform client -type UpdateServiceBrokerRequest struct { - GUID string `json:"guid"` - Name string `json:"name"` - BrokerURL string `json:"broker_url"` -} - -// DeleteServiceBrokerRequest type used for requests by the platform client -type DeleteServiceBrokerRequest struct { - GUID string `json:"guid"` - Name string `json:"name"` -} - -// ServiceBroker type for responses from the platform client -type ServiceBroker struct { - GUID string `json:"guid"` - Name string `json:"name"` - BrokerURL string `json:"broker_url"` - ServiceOfferings []types.ServiceOffering `json:"services"` - Metadata map[string]json.RawMessage `json:"metadata"` -} - -// ServiceBrokerList type for responses from the platform client -type ServiceBrokerList struct { - ServiceBrokers []ServiceBroker `json:"service_brokers"` -} - -// ServiceVisibilityEntity generic visibility entity -type ServiceVisibilityEntity struct { - Public bool - CatalogPlanID string - PlatformBrokerName string - Labels map[string]string -} diff --git a/pkg/platform/visibility_client.go b/pkg/platform/visibility_client.go index d6d9dfd7..905884b4 100644 --- a/pkg/platform/visibility_client.go +++ b/pkg/platform/visibility_client.go @@ -18,24 +18,38 @@ package platform import ( "context" - "encoding/json" + + "github.com/Peripli/service-manager/pkg/types" ) +// Visibility generic visibility entity +type Visibility struct { + Public bool + CatalogPlanID string + PlatformBrokerName string + Labels map[string]string +} + +// ModifyPlanAccessRequest type used for requests by the platform client +type ModifyPlanAccessRequest struct { + BrokerName string `json:"broker_name"` + CatalogPlanID string `json:"catalog_plan_id"` + Labels types.Labels `json:"labels"` +} + // VisibilityClient interface for platform clients to implement if they support // platform specific service and plan visibilities //go:generate counterfeiter . VisibilityClient type VisibilityClient interface { // GetVisibilitiesByBrokers get currently available visibilities in the platform for specific broker names - GetVisibilitiesByBrokers(context.Context, []string) ([]*ServiceVisibilityEntity, error) + GetVisibilitiesByBrokers(context.Context, []string) ([]*Visibility, error) // VisibilityScopeLabelKey returns a specific label key which should be used when converting SM visibilities to platform.Visibilities VisibilityScopeLabelKey() string - // EnableAccessForPlan enables the access to the plan with the specified GUID for - // the entities in the data - EnableAccessForPlan(ctx context.Context, data json.RawMessage, catalogPlanID, platformBrokerName string) error + // EnableAccessForPlan enables the access for the specified plan + EnableAccessForPlan(ctx context.Context, request *ModifyPlanAccessRequest) error - // DisableAccessForPlan disables the access to the plan with the specified GUID for - // the entities in the data - DisableAccessForPlan(ctx context.Context, data json.RawMessage, catalogPlanID, platformBrokerName string) error + // DisableAccessForPlan disables the access for the specified plan + DisableAccessForPlan(ctx context.Context, request *ModifyPlanAccessRequest) error } diff --git a/pkg/sbproxy/notifications/consumer.go b/pkg/sbproxy/notifications/consumer.go new file mode 100644 index 00000000..9fab2aad --- /dev/null +++ b/pkg/sbproxy/notifications/consumer.go @@ -0,0 +1,55 @@ +package notifications + +import ( + "context" + "encoding/json" + + "github.com/Peripli/service-manager/pkg/log" + "github.com/gofrs/uuid" + + "github.com/Peripli/service-manager/pkg/types" +) + +// ResourceNotificationHandler can handle notifications by processing the Payload +type ResourceNotificationHandler interface { + // OnCreate is called when a notification for creating a resource arrives + OnCreate(ctx context.Context, payload json.RawMessage) + + // OnUpdate is called when a notification for modifying a resource arrives + OnUpdate(ctx context.Context, payload json.RawMessage) + + // OnDelete is called when a notification for deleting a resource arrives + OnDelete(ctx context.Context, payload json.RawMessage) +} + +// Consumer allows consuming notifications by picking the correct handler to process it +type Consumer struct { + Handlers map[types.ObjectType]ResourceNotificationHandler +} + +// Consume consumes a notification and passes it to the correct handler for further processing +func (c *Consumer) Consume(ctx context.Context, n *types.Notification) { + notificationHandler, found := c.Handlers[n.Resource] + + if !found { + log.C(ctx).Warnf("No notification handler found for notification for resource %s. Ignoring notification...", n.Resource) + return + } + + correlationID, err := uuid.NewV4() + if err != nil { + log.C(ctx).Warnf("could not generate correlationID: %s", err) + } else { + entry := log.C(ctx).WithField(log.FieldCorrelationID, correlationID.String()) + ctx = log.ContextWithLogger(ctx, entry) + } + + switch n.Type { + case types.CREATED: + notificationHandler.OnCreate(ctx, n.Payload) + case types.MODIFIED: + notificationHandler.OnUpdate(ctx, n.Payload) + case types.DELETED: + notificationHandler.OnDelete(ctx, n.Payload) + } +} diff --git a/pkg/sbproxy/notifications/handlers/broker_handler.go b/pkg/sbproxy/notifications/handlers/broker_handler.go new file mode 100644 index 00000000..80ba97e6 --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/broker_handler.go @@ -0,0 +1,245 @@ +package handlers + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/Peripli/service-manager/storage/interceptors" + + "github.com/Peripli/service-manager/pkg/log" + + "github.com/Peripli/service-manager/pkg/types" + + "github.com/Peripli/service-broker-proxy/pkg/platform" +) + +type brokerPayload struct { + New brokerWithAdditionalDetails `json:"new"` + Old brokerWithAdditionalDetails `json:"old"` +} + +type brokerWithAdditionalDetails struct { + Resource *types.ServiceBroker `json:"resource"` + Additional interceptors.BrokerAdditional `json:"additional"` +} + +// Validate validates the broker payload +func (bp brokerPayload) Validate(op types.OperationType) error { + switch op { + case types.CREATED: + if err := bp.New.Validate(); err != nil { + return err + } + case types.MODIFIED: + if err := bp.Old.Validate(); err != nil { + return err + } + if err := bp.New.Validate(); err != nil { + return err + } + case types.DELETED: + if err := bp.Old.Validate(); err != nil { + return err + } + } + + return nil +} + +// Validate validates the broker and its additional details +func (bad brokerWithAdditionalDetails) Validate() error { + if bad.Resource == nil { + return fmt.Errorf("resource in notification payload cannot be nil") + } + if bad.Resource.ID == "" { + return fmt.Errorf("broker ID cannot be empty") + } + if bad.Resource.BrokerURL == "" { + return fmt.Errorf("broker URL cannot be empty") + } + if bad.Resource.Name == "" { + return fmt.Errorf("broker name cannot be empty") + } + + return bad.Additional.Validate() +} + +// BrokerResourceNotificationsHandler handles notifications for brokers +type BrokerResourceNotificationsHandler struct { + BrokerClient platform.BrokerClient + CatalogFetcher platform.CatalogFetcher + + ProxyPrefix string + ProxyPath string +} + +// OnCreate creates brokers from the specified notification payload by invoking the proper platform clients +func (bnh *BrokerResourceNotificationsHandler) OnCreate(ctx context.Context, payload json.RawMessage) { + log.C(ctx).Debugf("Processing broker create notification with payload %s...", string(payload)) + + brokerPayload := brokerPayload{} + if err := json.Unmarshal(payload, &brokerPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling broker create notification payload") + return + } + + if err := brokerPayload.Validate(types.CREATED); err != nil { + log.C(ctx).WithError(err).Error("error validating broker payload") + return + } + + brokerToCreate := brokerPayload.New + brokerProxyPath := bnh.brokerProxyPath(brokerToCreate.Resource) + brokerProxyName := bnh.brokerProxyName(brokerToCreate.Resource) + + log.C(ctx).Infof("Attempting to find platform broker with name %s in platform...", brokerToCreate.Resource.Name) + + existingBroker, err := bnh.BrokerClient.GetBrokerByName(ctx, brokerToCreate.Resource.Name) + if err != nil { + log.C(ctx).Warnf("Could not find platform broker in platform with name %s: %s", brokerToCreate.Resource.Name, err) + } + + if existingBroker == nil { + log.C(ctx).Infof("Could not find platform broker in platform with name %s. Attempting to create a SM proxy registration...", brokerProxyName) + + createRequest := &platform.CreateServiceBrokerRequest{ + Name: brokerProxyName, + BrokerURL: brokerProxyPath, + } + if _, err := bnh.BrokerClient.CreateBroker(ctx, createRequest); err != nil { + log.C(ctx).WithError(err).Errorf("error creating broker with name %s and URL %s", createRequest.Name, createRequest.BrokerURL) + return + } + log.C(ctx).Infof("Successfully created SM proxy registration in platform for broker with name %s", brokerProxyName) + } else { + log.C(ctx).Infof("Successfully found broker in platform with name %s and URL %s. Checking if proxification is needed...", existingBroker.Name, existingBroker.BrokerURL) + if shouldBeProxified(existingBroker, brokerToCreate.Resource) { + updateRequest := &platform.UpdateServiceBrokerRequest{ + GUID: existingBroker.GUID, + Name: brokerProxyName, + BrokerURL: brokerProxyPath, + } + + log.C(ctx).Infof("Proxifying platform broker with name %s and URL %s...", existingBroker.Name, existingBroker.BrokerURL) + if _, err := bnh.BrokerClient.UpdateBroker(ctx, updateRequest); err != nil { + log.C(ctx).WithError(err).Errorf("error proxifying platform broker with GUID %s with SM broker with id %s", existingBroker.GUID, brokerToCreate.Resource.GetID()) + return + } + } else { + log.C(ctx).Errorf("conflict error: existing platform broker with name %s and URL %s CANNOT be proxified to SM broker with URL %s. The URLs need to be the same", existingBroker.Name, existingBroker.BrokerURL, brokerToCreate.Resource.BrokerURL) + } + } +} + +// OnUpdate modifies brokers from the specified notification payload by invoking the proper platform clients +func (bnh *BrokerResourceNotificationsHandler) OnUpdate(ctx context.Context, payload json.RawMessage) { + log.C(ctx).Debugf("Processing broker update notification with payload %s...", string(payload)) + + brokerPayload := brokerPayload{} + if err := json.Unmarshal(payload, &brokerPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling broker create notification payload") + return + } + + if err := brokerPayload.Validate(types.MODIFIED); err != nil { + log.C(ctx).WithError(err).Error("error validating broker payload") + return + } + + brokerAfterUpdate := brokerPayload.New + brokerProxyName := bnh.brokerProxyName(brokerAfterUpdate.Resource) + brokerProxyPath := bnh.brokerProxyPath(brokerAfterUpdate.Resource) + + log.C(ctx).Infof("Attempting to find platform broker with name %s in platform...", brokerProxyName) + + existingBroker, err := bnh.BrokerClient.GetBrokerByName(ctx, brokerProxyName) + if err != nil { + log.C(ctx).Warnf("Could not find broker with name %s in the platform: %s. No update will be attempted", brokerProxyName, err) + return + } + + if existingBroker.BrokerURL != brokerProxyPath { + log.C(ctx).Warnf("Platform broker with name %s has an URL %s and is not proxified by SM. No update will be attempted", brokerProxyName, existingBroker.BrokerURL) + return + } + + log.C(ctx).Infof("Successfully found platform broker with name %s and URL %s. Refetching catalog...", existingBroker.Name, existingBroker.BrokerURL) + + fetchCatalogRequest := &platform.ServiceBroker{ + GUID: existingBroker.GUID, + Name: brokerProxyName, + BrokerURL: brokerProxyPath, + } + if bnh.CatalogFetcher != nil { + if err := bnh.CatalogFetcher.Fetch(ctx, fetchCatalogRequest); err != nil { + log.C(ctx).WithError(err).Errorf("error during fetching catalog for platform guid %s and sm id %s", fetchCatalogRequest.GUID, brokerAfterUpdate.Resource.ID) + return + } + } + log.C(ctx).Infof("Successfully refetched catalog for platform broker with name %s and URL %s", existingBroker.Name, existingBroker.BrokerURL) + +} + +// OnDelete deletes brokers from the provided notification payload by invoking the proper platform clients +func (bnh *BrokerResourceNotificationsHandler) OnDelete(ctx context.Context, payload json.RawMessage) { + log.C(ctx).Debugf("Processing broker delete notification with payload %s...", string(payload)) + + brokerPayload := brokerPayload{} + if err := json.Unmarshal(payload, &brokerPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling broker create notification payload") + return + } + + if err := brokerPayload.Validate(types.DELETED); err != nil { + log.C(ctx).WithError(err).Error("error validating broker payload") + return + } + + brokerToDelete := brokerPayload.Old + brokerProxyName := bnh.brokerProxyName(brokerToDelete.Resource) + brokerProxyPath := bnh.brokerProxyPath(brokerToDelete.Resource) + + log.C(ctx).Infof("Attempting to find platform broker with name %s in platform...", brokerProxyName) + + existingBroker, err := bnh.BrokerClient.GetBrokerByName(ctx, brokerProxyName) + if err != nil { + log.C(ctx).Warnf("Could not find broker with ID %s in the platform: %s", brokerToDelete.Resource.ID, err) + } + + if existingBroker == nil { + log.C(ctx).Warnf("Could not find broker with ID %s in the platform. No deletion will be attempted", brokerToDelete.Resource.ID) + return + } + + if existingBroker.BrokerURL != brokerProxyPath { + log.C(ctx).Warnf("Could not find proxified broker with ID %s in the platform. No deletion will be attempted", brokerToDelete.Resource.ID) + return + } + + log.C(ctx).Infof("Successfully found platform broker with name %s and URL %s. Attempting to delete...", existingBroker.Name, existingBroker.BrokerURL) + + deleteRequest := &platform.DeleteServiceBrokerRequest{ + GUID: existingBroker.GUID, + Name: brokerProxyName, + } + + if err := bnh.BrokerClient.DeleteBroker(ctx, deleteRequest); err != nil { + log.C(ctx).WithError(err).Errorf("error deleting broker with id %s name %s", deleteRequest.GUID, deleteRequest.Name) + return + } + log.C(ctx).Infof("Successfully deleted platform broker with platform ID %s and name %s", existingBroker.GUID, existingBroker.Name) +} + +func (bnh *BrokerResourceNotificationsHandler) brokerProxyPath(broker *types.ServiceBroker) string { + return bnh.ProxyPath + "/" + broker.GetID() +} + +func (bnh *BrokerResourceNotificationsHandler) brokerProxyName(broker *types.ServiceBroker) string { + return bnh.ProxyPrefix + broker.Name +} + +func shouldBeProxified(brokerFromPlatform *platform.ServiceBroker, brokerFromSM *types.ServiceBroker) bool { + return brokerFromPlatform.BrokerURL == brokerFromSM.BrokerURL && + brokerFromPlatform.Name == brokerFromSM.Name +} diff --git a/pkg/sbproxy/notifications/handlers/broker_handler_test.go b/pkg/sbproxy/notifications/handlers/broker_handler_test.go new file mode 100644 index 00000000..c78116f5 --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/broker_handler_test.go @@ -0,0 +1,615 @@ +package handlers_test + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/tidwall/sjson" + + "github.com/Peripli/service-broker-proxy/pkg/platform" + . "github.com/onsi/gomega" + + "github.com/Peripli/service-broker-proxy/pkg/platform/platformfakes" + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications/handlers" + . "github.com/onsi/ginkgo" +) + +var _ = Describe("Broker Handler", func() { + var ctx context.Context + + var fakeCatalogFetcher *platformfakes.FakeCatalogFetcher + var fakeBrokerClient *platformfakes.FakeBrokerClient + + var brokerHandler *handlers.BrokerResourceNotificationsHandler + + var brokerNotificationPayload string + var brokerName string + var brokerURL string + + var smBrokerID string + var catalog string + + var err error + + BeforeEach(func() { + ctx = context.TODO() + + smBrokerID = "brokerID" + brokerName = "brokerName" + brokerURL = "brokerURL" + + catalog = `{ + "services": [ + { + "name": "another-fake-service", + "id": "another-fake-service-id", + "description": "test-description", + "requires": ["another-route_forwarding"], + "tags": ["another-no-sql", "another-relational"], + "bindable": true, + "instances_retrievable": true, + "bindings_retrievable": true, + "metadata": { + "provider": { + "name": "another name" + }, + "listing": { + "imageUrl": "http://example.com/cat.gif", + "blurb": "another blurb here", + "longDescription": "A long time ago, in a another galaxy far far away..." + }, + "displayName": "another Fake Service Broker" + }, + "plan_updateable": true, + "plans": [] + } + ] + }` + + fakeCatalogFetcher = &platformfakes.FakeCatalogFetcher{} + fakeBrokerClient = &platformfakes.FakeBrokerClient{} + + brokerHandler = &handlers.BrokerResourceNotificationsHandler{ + BrokerClient: fakeBrokerClient, + CatalogFetcher: fakeCatalogFetcher, + ProxyPrefix: "proxyPrefix", + ProxyPath: "proxyPath", + } + }) + + Describe("OnCreate", func() { + BeforeEach(func() { + brokerNotificationPayload = fmt.Sprintf(` + { + "new": { + "resource": { + "id": "%s", + "name": "%s", + "broker_url": "%s", + "description": "brokerDescription", + "labels": { + "key1": ["value1", "value2"], + "key2": ["value3", "value4"] + } + }, + "additional": %s + } + }`, smBrokerID, brokerName, brokerURL, catalog) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + brokerNotificationPayload = `randomString` + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when notification payload is invalid", func() { + Context("when new resource is missing", func() { + BeforeEach(func() { + brokerNotificationPayload = `{"randomKey":"randomValue"}` + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when the resource ID is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "new.resource.id") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when the resource name is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "new.resource.name") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when the resource URL is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "new.resource.broker_url") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when additional services are empty", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "new.additional.services") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + }) + + Context("when getting broker by name from the platform returns an error", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(nil, fmt.Errorf("error")) + }) + + It("does try to create and not update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(1)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when a broker with the same name and URL exists in the platform", func() { + var expectedUpdateBrokerRequest *platform.UpdateServiceBrokerRequest + + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerName, + BrokerURL: brokerURL, + }, nil) + + expectedUpdateBrokerRequest = &platform.UpdateServiceBrokerRequest{ + GUID: smBrokerID, + Name: brokerHandler.ProxyPrefix + brokerName, + BrokerURL: brokerHandler.ProxyPath + "/" + smBrokerID, + } + + fakeBrokerClient.UpdateBrokerReturns(nil, fmt.Errorf("error")) + }) + + It("invokes update broker with the correct arguments", func() { + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(1)) + + callCtx, callRequest := fakeBrokerClient.UpdateBrokerArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedUpdateBrokerRequest)) + }) + }) + + Context("when a broker with the same name and URL does not exist in the platform", func() { + Context("when an error occurs", func() { + BeforeEach(func() { + fakeBrokerClient.CreateBrokerReturns(nil, fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedCreateBrokerRequest *platform.CreateServiceBrokerRequest + + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(nil, nil) + + expectedCreateBrokerRequest = &platform.CreateServiceBrokerRequest{ + Name: brokerHandler.ProxyPrefix + brokerName, + BrokerURL: brokerHandler.ProxyPath + "/" + smBrokerID, + } + + fakeBrokerClient.CreateBrokerReturns(nil, nil) + + }) + + It("invokes create broker with the correct arguments", func() { + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(1)) + + callCtx, callRequest := fakeBrokerClient.CreateBrokerArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedCreateBrokerRequest)) + }) + }) + }) + + Context("when a broker with the same name and different URL exists in the platform", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerName, + BrokerURL: "randomURL", + }, nil) + + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + brokerHandler.OnCreate(ctx, json.RawMessage(brokerNotificationPayload)) + }) + }) + }) + }) + + Describe("OnUpdate", func() { + BeforeEach(func() { + brokerNotificationPayload = fmt.Sprintf(` + { + "old": { + "resource": { + "id": "%s", + "name": "%s", + "broker_url": "%s", + "description": "brokerDescription", + "labels": { + "key1": ["value1", "value2"], + "key2": ["value3", "value4"] + } + }, + "additional": %s + }, + "new": { + "resource": { + "id": "%s", + "name": "%s", + "broker_url": "%s", + "description": "brokerDescription", + "labels": { + "key1": ["value1", "value2"], + "key2": ["value3", "value4"], + "key3": ["value5", "value6"] + } + }, + "additional": %s + }, + "label_changes": { + "op": "add", + "key": "key3", + "values": ["value5", "value6"] + } + }`, smBrokerID, brokerName, brokerURL, catalog, smBrokerID, brokerName, brokerURL, catalog) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + brokerNotificationPayload = `randomString` + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(len(fakeBrokerClient.Invocations())).To(Equal(0)) + }) + }) + + Context("when old resource is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "old") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when new resource is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "new") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when getting broker by name from the platform returns an error", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(nil, fmt.Errorf("error")) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when a broker with the same name and URL exists in the platform", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerName, + BrokerURL: brokerURL, + }, nil) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when a proxy registration for the SM broker exists in the platform", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerHandler.ProxyPrefix + smBrokerID, + BrokerURL: brokerHandler.ProxyPath + "/" + smBrokerID, + }, nil) + + }) + + Context("when an error occurs", func() { + BeforeEach(func() { + fakeCatalogFetcher.FetchReturns(fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedUpdateBrokerRequest *platform.ServiceBroker + + BeforeEach(func() { + expectedUpdateBrokerRequest = &platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerHandler.ProxyPrefix + brokerName, + BrokerURL: brokerHandler.ProxyPath + "/" + smBrokerID, + } + + fakeCatalogFetcher.FetchReturns(nil) + }) + + It("fetches the catalog and does not try to update/overtake the platform broker", func() { + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + brokerHandler.OnUpdate(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(1)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + + callCtx, callRequest := fakeCatalogFetcher.FetchArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedUpdateBrokerRequest)) + }) + }) + }) + }) + + Describe("OnDelete", func() { + BeforeEach(func() { + brokerNotificationPayload = fmt.Sprintf(` + { + "old": { + "resource": { + "id": "%s", + "name": "%s", + "broker_url": "%s", + "description": "brokerDescription", + "labels": { + "key1": ["value1", "value2"], + "key2": ["value3", "value4"] + } + }, + "additional": %s + } + }`, smBrokerID, brokerName, brokerURL, catalog) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + brokerNotificationPayload = `randomString` + }) + + It("does not try to create or update broker", func() { + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when notification payload is invalid", func() { + Context("when old resource is missing", func() { + BeforeEach(func() { + brokerNotificationPayload, err = sjson.Delete(brokerNotificationPayload, "old") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + }) + + Context("when getting broker by name from the platform returns an error", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(nil, fmt.Errorf("error")) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when a broker with the same name and URL does not exist in the platform", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: "randomName", + BrokerURL: "randomURL", + }, nil) + }) + + It("does not try to create, update or delete broker", func() { + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeCatalogFetcher.FetchCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.CreateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.UpdateBrokerCallCount()).To(Equal(0)) + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + }) + }) + + Context("when a broker with the same name and URL exists in the platform", func() { + BeforeEach(func() { + fakeBrokerClient.GetBrokerByNameReturns(&platform.ServiceBroker{ + GUID: smBrokerID, + Name: brokerHandler.ProxyPrefix + brokerName, + BrokerURL: brokerHandler.ProxyPath + "/" + smBrokerID, + }, nil) + }) + + Context("when an error occurs", func() { + BeforeEach(func() { + fakeBrokerClient.DeleteBrokerReturns(fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedDeleteBrokerRequest *platform.DeleteServiceBrokerRequest + + BeforeEach(func() { + expectedDeleteBrokerRequest = &platform.DeleteServiceBrokerRequest{ + GUID: smBrokerID, + Name: brokerHandler.ProxyPrefix + brokerName, + } + + fakeBrokerClient.DeleteBrokerReturns(nil) + }) + + It("invokes delete broker with the correct arguments", func() { + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(0)) + brokerHandler.OnDelete(ctx, json.RawMessage(brokerNotificationPayload)) + + Expect(fakeBrokerClient.DeleteBrokerCallCount()).To(Equal(1)) + + callCtx, callRequest := fakeBrokerClient.DeleteBrokerArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedDeleteBrokerRequest)) + }) + }) + }) + }) +}) diff --git a/pkg/sbproxy/notifications/handlers/handlers_suite_test.go b/pkg/sbproxy/notifications/handlers/handlers_suite_test.go new file mode 100644 index 00000000..e9617601 --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/handlers_suite_test.go @@ -0,0 +1,24 @@ +package handlers_test + +import ( + "testing" + + "github.com/Peripli/service-manager/test/testutil" + "github.com/sirupsen/logrus" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestHandlers(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Handlers Suite") +} + +func VerifyErrorLogged(f func()) { + hook := &testutil.LogInterceptor{} + logrus.AddHook(hook) + Expect(hook).ToNot(ContainSubstring("error")) + f() + Expect(hook).To(ContainSubstring("error")) +} diff --git a/pkg/sbproxy/notifications/handlers/helpers.go b/pkg/sbproxy/notifications/handlers/helpers.go new file mode 100644 index 00000000..a7c1e042 --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/helpers.go @@ -0,0 +1,25 @@ +package handlers + +import ( + "github.com/Peripli/service-manager/pkg/query" + "github.com/Peripli/service-manager/pkg/types" +) + +// LabelChangesToLabels transforms the specified label changes into two groups of labels - one for creation and one for deletion +func LabelChangesToLabels(changes query.LabelChanges) (types.Labels, types.Labels) { + labelsToAdd, labelsToRemove := types.Labels{}, types.Labels{} + for _, change := range changes { + switch change.Operation { + case query.AddLabelOperation: + fallthrough + case query.AddLabelValuesOperation: + labelsToAdd[change.Key] = append(labelsToAdd[change.Key], change.Values...) + case query.RemoveLabelOperation: + fallthrough + case query.RemoveLabelValuesOperation: + labelsToRemove[change.Key] = append(labelsToRemove[change.Key], change.Values...) + } + } + + return labelsToAdd, labelsToRemove +} diff --git a/pkg/sbproxy/notifications/handlers/helpers_test.go b/pkg/sbproxy/notifications/handlers/helpers_test.go new file mode 100644 index 00000000..30c54034 --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/helpers_test.go @@ -0,0 +1,81 @@ +package handlers_test + +import ( + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications/handlers" + "github.com/Peripli/service-manager/pkg/query" + "github.com/Peripli/service-manager/pkg/types" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Handlers Helpers", func() { + Describe("LabelChangesToLabels", func() { + Context("for changes with add and remove operations", func() { + var labelChanges query.LabelChanges + var expectedLabelsToAdd types.Labels + var expectedLabelsToRemove types.Labels + + BeforeEach(func() { + labelChanges = query.LabelChanges{ + &query.LabelChange{ + Operation: query.AddLabelOperation, + Key: "organization_guid", + Values: []string{ + "org1", + "org2", + }, + }, + &query.LabelChange{ + Operation: query.AddLabelValuesOperation, + Key: "organization_guid", + Values: []string{ + "org3", + "org4", + }, + }, + &query.LabelChange{ + Operation: query.RemoveLabelValuesOperation, + Key: "organization_guid", + Values: []string{ + "org5", + "org6", + }, + }, + &query.LabelChange{ + Operation: query.RemoveLabelOperation, + Key: "organization_guid", + Values: []string{ + "org7", + "org8", + }, + }, + } + + expectedLabelsToAdd = types.Labels{ + "organization_guid": { + "org1", + "org2", + "org3", + "org4", + }, + } + + expectedLabelsToRemove = types.Labels{ + "organization_guid": { + "org5", + "org6", + "org7", + "org8", + }, + } + }) + + It("generates correct labels", func() { + labelsToAdd, labelsToRemove := handlers.LabelChangesToLabels(labelChanges) + + Expect(labelsToAdd).To(Equal(expectedLabelsToAdd)) + Expect(labelsToRemove).To(Equal(expectedLabelsToRemove)) + }) + }) + }) +}) diff --git a/pkg/sbproxy/notifications/handlers/visibilities_handler.go b/pkg/sbproxy/notifications/handlers/visibilities_handler.go new file mode 100644 index 00000000..0a98946c --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/visibilities_handler.go @@ -0,0 +1,227 @@ +package handlers + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/Peripli/service-manager/storage/interceptors" + + "github.com/Peripli/service-manager/pkg/query" + + "github.com/Peripli/service-manager/pkg/log" + + "github.com/Peripli/service-manager/pkg/types" + + "github.com/Peripli/service-broker-proxy/pkg/platform" +) + +type visibilityPayload struct { + New visibilityWithAdditionalDetails `json:"new"` + Old visibilityWithAdditionalDetails `json:"old"` + LabelChanges query.LabelChanges `json:"label_changes"` +} + +type visibilityWithAdditionalDetails struct { + Resource *types.Visibility `json:"resource"` + Additional interceptors.VisibilityAdditional `json:"additional"` +} + +// Validate validates the visibility payload +func (vp visibilityPayload) Validate(op types.OperationType) error { + switch op { + case types.CREATED: + if err := vp.New.Validate(); err != nil { + return err + } + case types.MODIFIED: + if err := vp.Old.Validate(); err != nil { + return err + } + if err := vp.New.Validate(); err != nil { + return err + } + case types.DELETED: + if err := vp.Old.Validate(); err != nil { + return err + } + } + + return nil +} + +// Validate validates the visibility details +func (vwad visibilityWithAdditionalDetails) Validate() error { + if vwad.Resource == nil { + return fmt.Errorf("resource in notification payload cannot be nil") + } + + if vwad.Resource.ID == "" { + return fmt.Errorf("visibility id cannot be empty") + } + + if vwad.Resource.ServicePlanID == "" { + return fmt.Errorf("visibility service plan id cannot be empty") + } + + return vwad.Additional.Validate() +} + +// VisibilityResourceNotificationsHandler handles notifications for visibilities +type VisibilityResourceNotificationsHandler struct { + VisibilityClient platform.VisibilityClient + + ProxyPrefix string +} + +// OnCreate creates visibilities from the specified notification payload by invoking the proper platform clients +func (vnh *VisibilityResourceNotificationsHandler) OnCreate(ctx context.Context, payload json.RawMessage) { + if vnh.VisibilityClient == nil { + log.C(ctx).Warn("Platform client cannot handle visibilities. Visibility notification will be skipped") + return + } + + log.C(ctx).Debugf("Processing visibility create notification with payload %s...", string(payload)) + + visPayload := visibilityPayload{} + if err := json.Unmarshal(payload, &visPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling visibility create notification payload") + return + } + + if err := visPayload.Validate(types.CREATED); err != nil { + log.C(ctx).WithError(err).Error("error validating visibility payload") + return + } + + v := visPayload.New + platformBrokerName := vnh.ProxyPrefix + v.Additional.BrokerName + + log.C(ctx).Infof("Attempting to enable access for plan with catalog ID %s for platform broker with name %s and labels %v...", v.Additional.ServicePlan.CatalogID, platformBrokerName, v.Resource.GetLabels()) + + if err := vnh.VisibilityClient.EnableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: v.Additional.ServicePlan.CatalogID, + Labels: v.Resource.GetLabels(), + }); err != nil { + log.C(ctx).WithError(err).Errorf("error enabling access for plan %s in broker with name %s", v.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully enabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", v.Additional.ServicePlan.CatalogID, platformBrokerName, v.Resource.GetLabels()) +} + +// OnUpdate modifies visibilities from the specified notification payload by invoking the proper platform clients +func (vnh *VisibilityResourceNotificationsHandler) OnUpdate(ctx context.Context, payload json.RawMessage) { + if vnh.VisibilityClient == nil { + log.C(ctx).Warn("Platform client cannot handle visibilities. Visibility notification will be skipped.") + return + } + + log.C(ctx).Debugf("Processing visibility update notification with payload %s...", string(payload)) + + visibilityPayload := visibilityPayload{} + if err := json.Unmarshal(payload, &visibilityPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling visibility create notification payload") + return + } + + if err := visibilityPayload.Validate(types.MODIFIED); err != nil { + log.C(ctx).WithError(err).Error("error validating visibility payload") + return + } + + oldVisibilityPayload := visibilityPayload.Old + newVisibilityPayload := visibilityPayload.New + + platformBrokerName := vnh.ProxyPrefix + oldVisibilityPayload.Additional.BrokerName + + labelsToAdd, labelsToRemove := LabelChangesToLabels(visibilityPayload.LabelChanges) + + if oldVisibilityPayload.Additional.ServicePlan.CatalogID != newVisibilityPayload.Additional.ServicePlan.CatalogID { + log.C(ctx).Infof("The catalog plan ID has been modified. Attempting to disable access for plan with catalog ID %s for platform broker with name %s and labels %v...", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, oldVisibilityPayload.Resource.GetLabels()) + + if err := vnh.VisibilityClient.DisableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: oldVisibilityPayload.Additional.ServicePlan.CatalogID, + Labels: oldVisibilityPayload.Resource.GetLabels(), + }); err != nil { + log.C(ctx).WithError(err).Errorf("error disabling access for plan %s in broker with name %s", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully disabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, oldVisibilityPayload.Resource.GetLabels()) + + log.C(ctx).Infof("The catalog plan ID has been modified. Attempting to enable access for plan with catalog ID %s for platform broker with name %s and labels %v...", newVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, newVisibilityPayload.Resource.GetLabels()) + + if err := vnh.VisibilityClient.EnableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: newVisibilityPayload.Additional.ServicePlan.CatalogID, + Labels: newVisibilityPayload.Resource.GetLabels(), + }); err != nil { + log.C(ctx).WithError(err).Errorf("error enabling access for plan %s in broker with name %s", newVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully enabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", newVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, newVisibilityPayload.Resource.GetLabels()) + } + + log.C(ctx).Infof("Attempting to enable access for plan with catalog ID %s for platform broker with name %s and labels %v...", newVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, labelsToAdd) + + if err := vnh.VisibilityClient.EnableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: newVisibilityPayload.Additional.ServicePlan.CatalogID, + Labels: labelsToAdd, + }); err != nil { + log.C(ctx).WithError(err).Errorf("error enabling access for plan %s in broker with name %s", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully enabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", newVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, labelsToAdd) + + log.C(ctx).Infof("Attempting to disable access for plan with catalog ID %s for platform broker with name %s and labels %v...", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, labelsToRemove) + + if err := vnh.VisibilityClient.DisableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: newVisibilityPayload.Additional.ServicePlan.CatalogID, + Labels: labelsToRemove, + }); err != nil { + log.C(ctx).WithError(err).Errorf("error disabling access for plan %s in broker with name %s", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully disabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", oldVisibilityPayload.Additional.ServicePlan.CatalogID, platformBrokerName, labelsToRemove) + +} + +// OnDelete deletes visibilities from the provided notification payload by invoking the proper platform clients +func (vnh *VisibilityResourceNotificationsHandler) OnDelete(ctx context.Context, payload json.RawMessage) { + if vnh.VisibilityClient == nil { + log.C(ctx).Warn("Platform client cannot handle visibilities. Visibility notification will be skipped") + return + } + + log.C(ctx).Debugf("Processing visibility delete notification with payload %s...", string(payload)) + + visibilityPayload := visibilityPayload{} + if err := json.Unmarshal(payload, &visibilityPayload); err != nil { + log.C(ctx).WithError(err).Error("error unmarshaling visibility delete notification payload") + return + } + + if err := visibilityPayload.Validate(types.DELETED); err != nil { + log.C(ctx).WithError(err).Error("error validating visibility payload") + return + } + + v := visibilityPayload.Old + platformBrokerName := vnh.ProxyPrefix + v.Additional.BrokerName + + log.C(ctx).Infof("Attempting to disable access for plan with catalog ID %s for platform broker with name %s and labels %v...", v.Additional.ServicePlan.CatalogID, platformBrokerName, v.Resource.GetLabels()) + + if err := vnh.VisibilityClient.DisableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: platformBrokerName, + CatalogPlanID: v.Additional.ServicePlan.CatalogID, + Labels: v.Resource.GetLabels(), + }); err != nil { + log.C(ctx).WithError(err).Errorf("error disabling access for plan %s in broker with name %s", v.Additional.ServicePlan.CatalogID, platformBrokerName) + return + } + log.C(ctx).Infof("Successfully disabled access for plan with catalog ID %s for platform broker with name %s and labels %v...", v.Additional.ServicePlan.CatalogID, platformBrokerName, v.Resource.GetLabels()) + +} diff --git a/pkg/sbproxy/notifications/handlers/visibility_handler_test.go b/pkg/sbproxy/notifications/handlers/visibility_handler_test.go new file mode 100644 index 00000000..a98657ae --- /dev/null +++ b/pkg/sbproxy/notifications/handlers/visibility_handler_test.go @@ -0,0 +1,610 @@ +package handlers_test + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/tidwall/gjson" + + "github.com/tidwall/sjson" + + "github.com/Peripli/service-manager/pkg/query" + + "github.com/Peripli/service-manager/pkg/types" + + "github.com/Peripli/service-broker-proxy/pkg/platform" + . "github.com/onsi/gomega" + + "github.com/Peripli/service-broker-proxy/pkg/platform/platformfakes" + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications/handlers" + . "github.com/onsi/ginkgo" +) + +var _ = Describe("Visibility Handler", func() { + var ctx context.Context + + var fakeVisibilityClient *platformfakes.FakeVisibilityClient + + var visibilityHandler *handlers.VisibilityResourceNotificationsHandler + + var visibilityNotificationPayload string + + var labels string + var catalogPlan string + var catalogPlanID string + var anotherCatalogPlanID string + var smBrokerID string + var smBrokerName string + + var err error + + unmarshalLabels := func(labelsJSON string) types.Labels { + labels := types.Labels{} + err := json.Unmarshal([]byte(labelsJSON), &labels) + Expect(err).ShouldNot(HaveOccurred()) + + return labels + } + + unmarshalLabelChanges := func(labelChangeJSON string) query.LabelChanges { + labelChanges := query.LabelChanges{} + err := json.Unmarshal([]byte(labelChangeJSON), &labelChanges) + Expect(err).ShouldNot(HaveOccurred()) + + return labelChanges + } + + BeforeEach(func() { + ctx = context.TODO() + + smBrokerID = "brokerID" + smBrokerName = "brokerName" + + labels = ` + { + "key1": ["value1", "value2"], + "key2": ["value3", "value4"] + }` + catalogPlan = ` + { + "name": "another-plan-name-123", + "id": "another-plan-id-123", + "catalog_id": "catalog_id-123", + "catalog_name": "catalog_name-123", + "service_offering_id": "so-id-123", + "description": "test-description", + "free": true, + "metadata": { + "max_storage_tb": 5, + "costs":[ + { + "amount":{ + "usd":199.0 + }, + "unit":"MONTHLY" + }, + { + "amount":{ + "usd":0.99 + }, + "unit":"1GB of messages over 20GB" + } + ], + "bullets": [ + "40 concurrent connections" + ] + } + }` + + catalogPlanID = gjson.Get(catalogPlan, "catalog_id").Str + anotherCatalogPlanID = "anotherCatalogPlanID" + + fakeVisibilityClient = &platformfakes.FakeVisibilityClient{} + + visibilityHandler = &handlers.VisibilityResourceNotificationsHandler{ + VisibilityClient: fakeVisibilityClient, + } + }) + + Describe("OnCreate", func() { + BeforeEach(func() { + visibilityNotificationPayload = fmt.Sprintf(` + { + "new": { + "resource": { + "id": "visID", + "platform_id": "smPlatformID", + "service_plan_id": "smServicePlanID", + "labels": %s + }, + "additional": { + "broker_id": "%s", + "broker_name": "%s", + "service_plan": %s + } + } + }`, labels, smBrokerID, smBrokerName, catalogPlan) + }) + + Context("when visibility client is nil", func() { + It("does not try to enable or disable access", func() { + h := &handlers.VisibilityResourceNotificationsHandler{} + h.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + visibilityNotificationPayload = `randomString` + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when notification payload is invalid", func() { + Context("when new resource is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload = `{"randomKey":"randomValue"}` + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the visibility ID is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new.resource.id") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the visibility plan ID is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new.resource.service_plan_id") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the catalog plan is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new.additional.service_plan") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + }) + + Context("when the broker ID is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new.additional.broker_id") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the broker name is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new.additional.broker_name") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the notification payload is valid", func() { + Context("when an error occurs while enabling access", func() { + BeforeEach(func() { + fakeVisibilityClient.EnableAccessForPlanReturns(fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedRequest *platform.ModifyPlanAccessRequest + + BeforeEach(func() { + fakeVisibilityClient.EnableAccessForPlanReturns(nil) + + expectedRequest = &platform.ModifyPlanAccessRequest{ + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: catalogPlanID, + Labels: unmarshalLabels(labels), + } + }) + + It("invokes enable access for plan", func() { + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + visibilityHandler.OnCreate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(1)) + + callCtx, callRequest := fakeVisibilityClient.EnableAccessForPlanArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedRequest)) + }) + }) + }) + }) + + Describe("OnUpdate", func() { + var addLabelChanges string + var removeLabelChanges string + var labelChanges string + + BeforeEach(func() { + addLabelChanges = ` + { + "op": "add", + "key": "key3", + "values": ["value5", "value6"] + }` + + removeLabelChanges = ` + { + "op": "remove", + "key": "key2", + "values": ["value3", "value4"] + }` + + labelChanges = fmt.Sprintf(` + [%s, %s]`, addLabelChanges, removeLabelChanges) + + visibilityNotificationPayload = fmt.Sprintf(` + { + "old": { + "resource": { + "id": "visID", + "platform_id": "smPlatformID", + "service_plan_id": "smServicePlanID", + "labels": %s + }, + "additional": { + "broker_id": "%s", + "broker_name": "%s", + "service_plan": %s + } + }, + "new": { + "resource": { + "id": "visID", + "platform_id": "smPlatformID", + "service_plan_id": "smServicePlanID", + "labels": %s + }, + "additional": { + "broker_id": "%s", + "broker_name": "%s", + "service_plan": %s + } + }, + "label_changes": %s + }`, labels, smBrokerID, smBrokerName, catalogPlan, labels, smBrokerID, smBrokerName, catalogPlan, labelChanges) + }) + + Context("when visibility client is nil", func() { + It("does not try to enable or disable access", func() { + h := &handlers.VisibilityResourceNotificationsHandler{} + h.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + visibilityNotificationPayload = `randomString` + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when old resource is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "old") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when new resource is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "new") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when the notification payload is valid", func() { + Context("when an error occurs while enabling access", func() { + BeforeEach(func() { + fakeVisibilityClient.EnableAccessForPlanReturns(fmt.Errorf("error")) + fakeVisibilityClient.DisableAccessForPlanReturns(nil) + + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + }) + }) + }) + + Context("when an error occurs while disabling access", func() { + BeforeEach(func() { + fakeVisibilityClient.EnableAccessForPlanReturns(nil) + fakeVisibilityClient.DisableAccessForPlanReturns(fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedEnableAccessRequests []*platform.ModifyPlanAccessRequest + var expectedDisableAccessRequests []*platform.ModifyPlanAccessRequest + var labelsToAdd types.Labels + var labelsToRemove types.Labels + + verifyInvocations := func(enableAccessCount, disableAccessCount int) { + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + + visibilityHandler.OnUpdate(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(enableAccessCount)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(disableAccessCount)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(len(expectedEnableAccessRequests))) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(len(expectedDisableAccessRequests))) + + for i, expectedRequest := range expectedEnableAccessRequests { + callCtx, enableAccessRequest := fakeVisibilityClient.EnableAccessForPlanArgsForCall(i) + Expect(callCtx).To(Equal(ctx)) + Expect(enableAccessRequest).To(Equal(expectedRequest)) + } + + for i, expectedRequest := range expectedDisableAccessRequests { + callCtx, enableAccessRequest := fakeVisibilityClient.DisableAccessForPlanArgsForCall(i) + Expect(callCtx).To(Equal(ctx)) + Expect(enableAccessRequest).To(Equal(expectedRequest)) + } + } + + BeforeEach(func() { + fakeVisibilityClient.EnableAccessForPlanReturns(nil) + fakeVisibilityClient.DisableAccessForPlanReturns(nil) + + labelsToAdd, labelsToRemove = handlers.LabelChangesToLabels(unmarshalLabelChanges(labelChanges)) + expectedEnableAccessRequests = []*platform.ModifyPlanAccessRequest{ + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: catalogPlanID, + Labels: labelsToAdd, + }, + } + + expectedDisableAccessRequests = []*platform.ModifyPlanAccessRequest{ + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: catalogPlanID, + Labels: labelsToRemove, + }, + } + }) + + It("invokes enable and disable access for the plan", func() { + verifyInvocations(1, 1) + }) + + Context("when the catalog plan ID is modified", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Set(visibilityNotificationPayload, "new.additional.service_plan.catalog_id", anotherCatalogPlanID) + Expect(err).ShouldNot(HaveOccurred()) + + expectedEnableAccessRequests = []*platform.ModifyPlanAccessRequest{ + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: anotherCatalogPlanID, + Labels: unmarshalLabels(labels), + }, + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: anotherCatalogPlanID, + Labels: labelsToAdd, + }, + } + + expectedDisableAccessRequests = []*platform.ModifyPlanAccessRequest{ + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: catalogPlanID, + Labels: unmarshalLabels(labels), + }, + { + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: anotherCatalogPlanID, + Labels: labelsToRemove, + }, + } + }) + + It("invokes enable access for the new catalog plan id and disable access for the old catalog plan id", func() { + verifyInvocations(2, 2) + }) + }) + }) + }) + }) + + Describe("OnDelete", func() { + BeforeEach(func() { + visibilityNotificationPayload = fmt.Sprintf(` + { + "old": { + "resource": { + "id": "visID", + "platform_id": "smPlatformID", + "service_plan_id": "smServicePlanID", + "labels": %s + }, + "additional": { + "broker_id": "%s", + "broker_name": "%s", + "service_plan": %s + } + } + }`, labels, smBrokerID, smBrokerName, catalogPlan) + }) + + Context("when visibility client is nil", func() { + It("does not try to enable or disable access", func() { + h := &handlers.VisibilityResourceNotificationsHandler{} + h.OnDelete(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when unmarshaling notification payload fails", func() { + BeforeEach(func() { + visibilityNotificationPayload = `randomString` + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnDelete(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + + Context("when notification payload is invalid", func() { + Context("when old resource is missing", func() { + BeforeEach(func() { + visibilityNotificationPayload, err = sjson.Delete(visibilityNotificationPayload, "old") + Expect(err).ShouldNot(HaveOccurred()) + }) + + It("does not try to enable or disable access", func() { + visibilityHandler.OnDelete(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(0)) + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + }) + }) + }) + + Context("when the notification payload is valid", func() { + Context("when an error occurs while disabling access", func() { + BeforeEach(func() { + fakeVisibilityClient.DisableAccessForPlanReturns(fmt.Errorf("error")) + }) + + It("logs an error", func() { + VerifyErrorLogged(func() { + visibilityHandler.OnDelete(ctx, json.RawMessage(visibilityNotificationPayload)) + }) + }) + }) + + Context("when no error occurs", func() { + var expectedRequest *platform.ModifyPlanAccessRequest + + BeforeEach(func() { + fakeVisibilityClient.DisableAccessForPlanReturns(nil) + + expectedRequest = &platform.ModifyPlanAccessRequest{ + BrokerName: visibilityHandler.ProxyPrefix + smBrokerName, + CatalogPlanID: catalogPlanID, + Labels: unmarshalLabels(labels), + } + }) + + It("invokes disable access for plan", func() { + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(0)) + visibilityHandler.OnDelete(ctx, json.RawMessage(visibilityNotificationPayload)) + + Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(1)) + + callCtx, callRequest := fakeVisibilityClient.DisableAccessForPlanArgsForCall(0) + + Expect(callCtx).To(Equal(ctx)) + Expect(callRequest).To(Equal(expectedRequest)) + }) + }) + + }) + }) +}) diff --git a/pkg/sbproxy/notifications/producer.go b/pkg/sbproxy/notifications/producer.go new file mode 100644 index 00000000..fe0dce31 --- /dev/null +++ b/pkg/sbproxy/notifications/producer.go @@ -0,0 +1,355 @@ +/* + * Copyright 2018 The Service Manager Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package notifications + +import ( + "context" + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/url" + "path" + "strconv" + "sync" + "time" + + "github.com/Peripli/service-manager/api/notifications" + "github.com/pkg/errors" + + "github.com/Peripli/service-broker-proxy/pkg/sm" + "github.com/Peripli/service-manager/pkg/log" + "github.com/Peripli/service-manager/pkg/types" + "github.com/gorilla/websocket" +) + +var errLastNotificationGone = errors.New("last notification revision no longer present in SM") + +const unknownRevision int64 = -1 + +// Producer reads notifications coming from the websocket connection and regularly +// triggers full resync +type Producer struct { + producerSettings ProducerSettings + smSettings sm.Settings + + lastNotificationRevision int64 + conn *websocket.Conn + pingPeriod time.Duration + readTimeout time.Duration + url *url.URL +} + +// ProducerSettings are the settings for the producer +type ProducerSettings struct { + // MinPingPeriod is the minimum allowed ping period + MinPingPeriod time.Duration `mapstructure:"min_ping_period"` + // Reconnect delay is the time between reconnects + ReconnectDelay time.Duration `mapstructure:"reconnect_delay"` + // PongTimeout is the maximum time to wait between a ping and a pong + PongTimeout time.Duration `mapstructure:"pong_timeout"` + // PingPeriodPercentage is the percentage of actual ping period compared to the max_ping_period returned by SM + PingPeriodPercentage int64 `mapstructure:"ping_period_percentage"` + // MessagesQueueSize is the size of the messages queue + MessagesQueueSize int `mapstructure:"messages_queue_size"` + // ResyncPeriod is the time between two resyncs + ResyncPeriod time.Duration `mapstructure:"resync_period"` +} + +// Validate validates the producer settings +func (p ProducerSettings) Validate() error { + if p.MinPingPeriod <= 0 { + return fmt.Errorf("ProducerSettings: min ping period must be positive duration") + } + if p.ReconnectDelay < 0 { + return fmt.Errorf("ProducerSettings: reconnect delay must be non-negative duration") + } + if p.PongTimeout <= 0 { + return fmt.Errorf("ProducerSettings: pong time must be positive duration") + } + if p.PingPeriodPercentage <= 0 || p.PingPeriodPercentage >= 100 { + return fmt.Errorf("ProducerSettings: ping period percentage must be between 0 and 100") + } + if p.ResyncPeriod <= 0 { + return fmt.Errorf("ProducerSettings: Resync Period must be positive duration") + } + return nil +} + +// DefaultProducerSettings are the default settings for the producer +func DefaultProducerSettings() *ProducerSettings { + return &ProducerSettings{ + MinPingPeriod: time.Second, + ReconnectDelay: 3 * time.Second, + PongTimeout: 2 * time.Second, + PingPeriodPercentage: 60, + MessagesQueueSize: 1024, + ResyncPeriod: 12 * time.Hour, + } +} + +// Message is the payload sent by the producer +type Message struct { + // Notification is the notification that needs to be applied on the platform + Notification *types.Notification + + // Resync indicates that full resync is needed + // In this case Notification is nil + // Resync messages are written in the channel: + // - The first message in the channel, so we always start with a resync + // - When notifications are lost (indicated by status 410 returned by SM) + // - On regular interval based on configuration + Resync bool +} + +// NewProducer returns a configured producer for the given settings +func NewProducer(producerSettings *ProducerSettings, smSettings *sm.Settings) (*Producer, error) { + notificationsURL, err := buildNotificationsURL(smSettings.URL, smSettings.NotificationsAPIPath) + if err != nil { + return nil, err + } + return &Producer{ + url: notificationsURL, + producerSettings: *producerSettings, + smSettings: *smSettings, + }, nil +} + +func buildNotificationsURL(baseURL, notificationsPath string) (*url.URL, error) { + smURL, err := url.Parse(baseURL) + if err != nil { + return nil, err + } + switch smURL.Scheme { + case "http": + smURL.Scheme = "ws" + case "https": + smURL.Scheme = "wss" + } + smURL.Path = path.Join(smURL.Path, notificationsPath) + return smURL, nil +} + +// Start starts the producer in a new go-routine +func (p *Producer) Start(ctx context.Context, group *sync.WaitGroup) <-chan *Message { + group.Add(1) + messages := make(chan *Message, p.producerSettings.MessagesQueueSize) + go p.run(ctx, messages, group) + return messages +} + +func (p *Producer) run(ctx context.Context, messages chan *Message, group *sync.WaitGroup) { + defer group.Done() + resyncChan := make(chan struct{}) + p.lastNotificationRevision = unknownRevision + go p.scheduleResync(ctx, resyncChan, messages) + for { + needResync := p.lastNotificationRevision == unknownRevision + if err := p.connect(ctx); err != nil { + log.C(ctx).WithError(err).Error("could not connect websocket") + if err == errLastNotificationGone { // skip reconnect delay + p.lastNotificationRevision = unknownRevision + continue + } + } else { + if needResync { + log.C(ctx).Info("Last notification revision is gone. Triggering resync") + resyncChan <- struct{}{} + } + p.conn.SetPongHandler(func(string) error { + log.C(ctx).Debug("Received pong") + return p.conn.SetReadDeadline(time.Now().Add(p.readTimeout)) + }) + + done := make(chan struct{}, 1) + childContext, stopChildren := context.WithCancel(ctx) + go p.readNotifications(childContext, messages, done) + go p.ping(childContext, done) + + <-done // wait for at least one child goroutine (reader/writer) to exit + stopChildren() + p.closeConnection(ctx) + } + select { + case <-ctx.Done(): + log.C(ctx).Info("Context cancelled. Terminating notifications handler") + return + case <-time.After(p.producerSettings.ReconnectDelay): + log.C(ctx).Debug("Attempting to reestablish websocket connection") + } + } +} + +func (p *Producer) scheduleResync(ctx context.Context, resyncChan chan struct{}, messages chan *Message) { + timer := time.NewTimer(p.producerSettings.ResyncPeriod) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + log.C(ctx).Info("Context cancelled while waiting. Terminating resync timer.") + return + case <-resyncChan: + case <-timer.C: + log.C(ctx).Debug("Resync period has elapsed. Triggering resync.") + } + select { + case <-ctx.Done(): + log.C(ctx).Info("Context cancelled while sending message. Terminating resync timer.") + return + case messages <- &Message{Resync: true}: + log.C(ctx).Debugf("Sending resync message. Channel len/cap: %d/%d", len(messages), cap(messages)) + } + timer.Stop() + timer = time.NewTimer(p.producerSettings.ResyncPeriod) + } +} + +func (p *Producer) readNotifications(ctx context.Context, messages chan *Message, done chan<- struct{}) { + defer func() { + log.C(ctx).Debug("Exiting notification reader") + done <- struct{}{} + }() + log.C(ctx).Debug("Starting notification reader") + for { + if err := p.conn.SetReadDeadline(time.Now().Add(p.readTimeout)); err != nil { + log.C(ctx).WithError(err).Error("Error setting read timeout on websocket") + return + } + _, bytes, err := p.conn.ReadMessage() + if err != nil { + log.C(ctx).WithError(err).Error("Error reading from websocket") + return + } + var notification types.Notification + if err = json.Unmarshal(bytes, ¬ification); err != nil { + log.C(ctx).WithError(err).Error("Could not unmarshal WS message into a notification") + return + } + log.C(ctx).Debugf("Received notification with revision %d", notification.Revision) + select { + case <-ctx.Done(): + log.C(ctx).Info("Context cancelled while sending message. Terminating notification reader.") + return + case messages <- &Message{Notification: ¬ification}: + log.C(ctx).Debugf("Notification written in channel. Channel len/cap: %d/%d", len(messages), cap(messages)) + } + + p.lastNotificationRevision = notification.Revision + } +} + +func (p *Producer) ping(ctx context.Context, done chan<- struct{}) { + defer func() { + log.C(ctx).Debug("Exiting pinger") + done <- struct{}{} + }() + log.C(ctx).Debug("Starting pinger") + ticker := time.NewTicker(p.pingPeriod) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + log.C(ctx).Debug("Sending ping") + if err := p.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { + log.C(ctx).WithError(err).Error("Could not write message on the websocket") + return + } + } + } +} + +func (p *Producer) connect(ctx context.Context) error { + headers := http.Header{} + auth := "Basic " + base64.StdEncoding.EncodeToString([]byte(p.smSettings.User+":"+p.smSettings.Password)) + headers.Add("Authorization", auth) + + connectURL := *p.url + if p.lastNotificationRevision != unknownRevision { + q := connectURL.Query() + q.Set(notifications.LastKnownRevisionQueryParam, strconv.FormatInt(p.lastNotificationRevision, 10)) + connectURL.RawQuery = q.Encode() + } + dialer := &websocket.Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: p.smSettings.RequestTimeout, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: p.smSettings.SkipSSLValidation, + }, + } + log.C(ctx).Debugf("Connecting to %s ...", &connectURL) + var err error + var resp *http.Response + p.conn, resp, err = dialer.DialContext(ctx, connectURL.String(), headers) + if err != nil { + if resp != nil { + log.C(ctx).WithError(err).Errorf("Could not connect to %s: status: %d", &connectURL, resp.StatusCode) + if resp.StatusCode == http.StatusGone { + return errLastNotificationGone + } + } + return err + } + if err = p.readResponseHeaders(ctx, resp.Header); err != nil { + p.closeConnection(ctx) + return err + } + + return nil +} + +func (p *Producer) readResponseHeaders(ctx context.Context, header http.Header) error { + if p.lastNotificationRevision == unknownRevision { + rev := header.Get(notifications.LastKnownRevisionHeader) + revision, err := strconv.ParseInt(rev, 10, 64) + if err != nil { + return fmt.Errorf("invalid last notification revision received (%s): %v", rev, err) + } + if revision < 0 { + return fmt.Errorf("invalid last notification revision received (%d)", revision) + } + p.lastNotificationRevision = revision + } + + maxPingPeriod, err := time.ParseDuration(header.Get(notifications.MaxPingPeriodHeader)) + if err != nil { + return errors.Wrap(err, "invalid max ping period received") + } + if maxPingPeriod < p.producerSettings.MinPingPeriod { + return fmt.Errorf("invalid max ping period (%s) must be greater than the minimum ping period (%s)", maxPingPeriod, p.producerSettings.MinPingPeriod) + } + p.pingPeriod = time.Duration(int64(maxPingPeriod) * p.producerSettings.PingPeriodPercentage / 100) + p.readTimeout = p.pingPeriod + p.producerSettings.PongTimeout // should be longer than pingPeriod + log.C(ctx).Infof("Connected! Ping period: %s pong timeout: %s", p.pingPeriod, p.readTimeout) + return nil +} + +func (p *Producer) closeConnection(ctx context.Context) { + if p.conn == nil { + return + } + log.C(ctx).Debug("Closing websocket connection") + if err := p.conn.WriteControl(websocket.CloseMessage, []byte{}, time.Now().Add(p.smSettings.RequestTimeout)); err != nil { + log.C(ctx).WithError(err).Warn("Could not send close message on websocket") + } + if err := p.conn.Close(); err != nil { + log.C(ctx).WithError(err).Warn("Could not close websocket connection") + } +} diff --git a/pkg/sbproxy/notifications/producer_test.go b/pkg/sbproxy/notifications/producer_test.go new file mode 100644 index 00000000..bb640531 --- /dev/null +++ b/pkg/sbproxy/notifications/producer_test.go @@ -0,0 +1,567 @@ +package notifications_test + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "sync" + "testing" + "time" + + smnotifications "github.com/Peripli/service-manager/api/notifications" + + "github.com/Peripli/service-manager/pkg/types" + "github.com/sirupsen/logrus" + + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications" + "github.com/Peripli/service-broker-proxy/pkg/sm" + "github.com/Peripli/service-manager/pkg/log" + "github.com/gorilla/websocket" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +func TestNotifications(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Notifications Suite") +} + +var _ = Describe("Notifications", func() { + Describe("Producer Settings", func() { + var settings *notifications.ProducerSettings + BeforeEach(func() { + settings = notifications.DefaultProducerSettings() + }) + It("Default settings are valid", func() { + err := settings.Validate() + Expect(err).ToNot(HaveOccurred()) + }) + + Context("When MinPingPeriod is invalid", func() { + It("Validate returns error", func() { + settings.MinPingPeriod = 0 + err := settings.Validate() + Expect(err).To(HaveOccurred()) + }) + }) + + Context("When ReconnectDelay is invalid", func() { + It("Validate returns error", func() { + settings.ReconnectDelay = -1 * time.Second + err := settings.Validate() + Expect(err).To(HaveOccurred()) + }) + }) + Context("When PongTimeout is invalid", func() { + It("Validate returns error", func() { + settings.PongTimeout = 0 + err := settings.Validate() + Expect(err).To(HaveOccurred()) + }) + }) + Context("When PingPeriodPercentage is invalid", func() { + It("Validate returns error", func() { + settings.PingPeriodPercentage = 0 + err := settings.Validate() + Expect(err).To(HaveOccurred()) + + settings.PingPeriodPercentage = 100 + err = settings.Validate() + Expect(err).To(HaveOccurred()) + }) + }) + + Context("when resync period is missing", func() { + It("returns an error", func() { + settings.ResyncPeriod = 0 + err := settings.Validate() + Expect(err).To(HaveOccurred()) + }) + }) + }) + + Describe("Producer", func() { + var ctx context.Context + var cancelFunc func() + var logInterceptor *logWriter + + var producerSettings *notifications.ProducerSettings + var smSettings *sm.Settings + var producer *notifications.Producer + var producerCtx context.Context + + var server *wsServer + var group *sync.WaitGroup + + BeforeEach(func() { + group = &sync.WaitGroup{} + ctx, cancelFunc = context.WithCancel(context.Background()) + logInterceptor = &logWriter{} + logInterceptor.Reset() + producerCtx = log.Configure(ctx, &log.Settings{ + Level: "debug", + Format: "text", + Output: GinkgoWriter, + }) + log.AddHook(logInterceptor) + server = newWSServer() + server.Start() + smSettings = &sm.Settings{ + URL: server.url, + User: "admin", + Password: "admin", + RequestTimeout: 2 * time.Second, + NotificationsAPIPath: "/v1/notifications", + } + producerSettings = ¬ifications.ProducerSettings{ + MinPingPeriod: 100 * time.Millisecond, + ReconnectDelay: 100 * time.Millisecond, + PongTimeout: 20 * time.Millisecond, + ResyncPeriod: 300 * time.Millisecond, + PingPeriodPercentage: 60, + MessagesQueueSize: 10, + } + }) + + JustBeforeEach(func() { + var err error + producer, err = notifications.NewProducer(producerSettings, smSettings) + Expect(err).ToNot(HaveOccurred()) + }) + + notification := &types.Notification{ + Revision: 123, + Payload: json.RawMessage("{}"), + } + message := ¬ifications.Message{Notification: notification} + + AfterEach(func() { + cancelFunc() + server.Close() + }) + + Context("During websocket connect", func() { + It("Sends correct basic credentials", func(done Done) { + server.onRequest = func(r *http.Request) { + defer GinkgoRecover() + username, password, ok := r.BasicAuth() + Expect(ok).To(BeTrue()) + Expect(username).To(Equal(smSettings.User)) + Expect(password).To(Equal(smSettings.Password)) + close(done) + } + producer.Start(producerCtx, group) + }) + }) + + Context("When last notification revision is not found", func() { + BeforeEach(func() { + requestCnt := 0 + server.onRequest = func(r *http.Request) { + requestCnt++ + if requestCnt == 1 { + server.statusCode = http.StatusGone + } else { + server.statusCode = 0 + } + } + }) + + It("Sends restart message", func() { + Eventually(producer.Start(producerCtx, group)).Should(Receive(Equal(¬ifications.Message{Resync: true}))) + }) + }) + + Context("When notifications is sent by the server", func() { + BeforeEach(func() { + server.onClientConnected = func(conn *websocket.Conn) { + defer GinkgoRecover() + err := conn.WriteJSON(notification) + Expect(err).ToNot(HaveOccurred()) + } + }) + + It("forwards the notification on the channel", func() { + Eventually(producer.Start(producerCtx, group)).Should(Receive(Equal(message))) + }) + }) + + Context("When connection is closed", func() { + It("reconnects with last known notification revision", func(done Done) { + requestCount := 0 + server.onRequest = func(r *http.Request) { + defer GinkgoRecover() + requestCount++ + if requestCount > 1 { + rev := r.URL.Query().Get(smnotifications.LastKnownRevisionQueryParam) + Expect(rev).To(Equal(strconv.FormatInt(notification.Revision, 10))) + close(done) + } + } + once := &sync.Once{} + server.onClientConnected = func(conn *websocket.Conn) { + once.Do(func() { + defer GinkgoRecover() + err := conn.WriteJSON(notification) + Expect(err).ToNot(HaveOccurred()) + conn.Close() + }) + } + messages := producer.Start(producerCtx, group) + Eventually(messages).Should(Receive(Equal(message))) + }) + }) + + Context("When websocket is connected", func() { + It("Pings the servers within max_ping_period", func(done Done) { + times := make(chan time.Time, 10) + server.onClientConnected = func(conn *websocket.Conn) { + times <- time.Now() + } + server.pingHandler = func(string) error { + defer GinkgoRecover() + now := time.Now() + times <- now + if len(times) == 3 { + start := <-times + for i := 0; i < 2; i++ { + t := <-times + pingPeriod, err := time.ParseDuration(server.maxPingPeriod) + Expect(err).ToNot(HaveOccurred()) + Expect(t.Sub(start)).To(BeNumerically("<", pingPeriod)) + start = t + } + close(done) + } + server.conn.WriteControl(websocket.PongMessage, []byte{}, now.Add(1*time.Second)) + return nil + } + producer.Start(producerCtx, group) + }) + }) + + Context("When invalid last notification revision is sent", func() { + BeforeEach(func() { + server.lastNotificationRevision = "-1" + }) + It("returns error", func() { + producer.Start(producerCtx, group) + Eventually(logInterceptor.String).Should(ContainSubstring("invalid last notification revision")) + }) + }) + + Context("When server does not return pong within the timeout", func() { + It("Reconnects", func(done Done) { + var initialPingTime time.Time + var timeBetweenReconnection time.Duration + server.pingHandler = func(s string) error { + if initialPingTime.IsZero() { + initialPingTime = time.Now() + } + return nil + } + server.onClientConnected = func(conn *websocket.Conn) { + if !initialPingTime.IsZero() { + defer GinkgoRecover() + timeBetweenReconnection = time.Now().Sub(initialPingTime) + pingPeriod, err := time.ParseDuration(server.maxPingPeriod) + Expect(err).ToNot(HaveOccurred()) + Expect(timeBetweenReconnection).To(BeNumerically("<", producerSettings.ReconnectDelay+pingPeriod)) + close(done) + } + } + producer.Start(producerCtx, group) + }) + }) + + Context("When notification is not a valid JSON", func() { + It("Logs error and reconnects", func(done Done) { + connectionAttempts := 0 + server.onClientConnected = func(conn *websocket.Conn) { + defer GinkgoRecover() + err := conn.WriteMessage(websocket.TextMessage, []byte("not-json")) + Expect(err).ToNot(HaveOccurred()) + connectionAttempts++ + if connectionAttempts == 2 { + Eventually(logInterceptor.String).Should(ContainSubstring("unmarshal")) + close(done) + } + } + producer.Start(producerCtx, group) + }) + }) + + assertLogContainsMessageOnReconnect := func(message string, done Done) { + connectionAttempts := 0 + server.onClientConnected = func(conn *websocket.Conn) { + defer GinkgoRecover() + connectionAttempts++ + if connectionAttempts == 2 { + Eventually(logInterceptor.String).Should(ContainSubstring(message)) + close(done) + } + } + producer.Start(producerCtx, group) + } + + Context("When last_notification_revision is not a number", func() { + BeforeEach(func() { + server.lastNotificationRevision = "not a number" + }) + It("Logs error and reconnects", func(done Done) { + assertLogContainsMessageOnReconnect(server.lastNotificationRevision, done) + }) + }) + + Context("When max_ping_period is not a number", func() { + BeforeEach(func() { + server.maxPingPeriod = "not a number" + }) + It("Logs error and reconnects", func(done Done) { + assertLogContainsMessageOnReconnect(server.maxPingPeriod, done) + }) + }) + + Context("When max_ping_period is less than the configured min ping period", func() { + BeforeEach(func() { + server.maxPingPeriod = (producerSettings.MinPingPeriod - 20*time.Millisecond).String() + }) + It("Logs error and reconnects", func(done Done) { + assertLogContainsMessageOnReconnect(server.maxPingPeriod, done) + }) + }) + + Context("When SM URL is not valid", func() { + It("Returns error", func() { + smSettings.URL = "::invalid-url" + newProducer, err := notifications.NewProducer(producerSettings, smSettings) + Expect(newProducer).To(BeNil()) + Expect(err).To(HaveOccurred()) + }) + }) + + Context("When SM returns error status", func() { + BeforeEach(func() { + server.statusCode = http.StatusInternalServerError + }) + It("Logs and reconnects", func(done Done) { + connectionAttempts := 0 + server.onRequest = func(r *http.Request) { + connectionAttempts++ + if connectionAttempts == 2 { + server.statusCode = 0 + } + } + server.onClientConnected = func(conn *websocket.Conn) { + Eventually(logInterceptor.String).Should(ContainSubstring("bad handshake")) + close(done) + } + producer.Start(producerCtx, group) + }) + }) + + Context("When cannot connect to given address", func() { + BeforeEach(func() { + smSettings.URL = "http://bad-host" + }) + It("Logs the error and tries to reconnect", func() { + var err error + producer, err = notifications.NewProducer(producerSettings, smSettings) + Expect(err).ToNot(HaveOccurred()) + producer.Start(producerCtx, group) + Eventually(logInterceptor.String).Should(ContainSubstring("no such host")) + Eventually(logInterceptor.String).Should(ContainSubstring("Attempting to reestablish websocket connection")) + }) + }) + + Context("When context is canceled", func() { + It("Releases the group", func() { + testCtx, cancel := context.WithCancel(context.Background()) + waitGroup := &sync.WaitGroup{} + producer.Start(testCtx, waitGroup) + cancel() + waitGroup.Wait() + }) + }) + + Context("When messages queue is full", func() { + BeforeEach(func() { + producerSettings.MessagesQueueSize = 2 + server.onClientConnected = func(conn *websocket.Conn) { + defer GinkgoRecover() + err := conn.WriteJSON(notification) + Expect(err).ToNot(HaveOccurred()) + err = conn.WriteJSON(notification) + Expect(err).ToNot(HaveOccurred()) + } + }) + It("Canceling context stops the goroutines", func() { + producer.Start(producerCtx, group) + Eventually(logInterceptor.String).Should(ContainSubstring("Received notification ")) + cancelFunc() + Eventually(logInterceptor.String).Should(ContainSubstring("Exiting notification reader")) + }) + }) + + Context("When resync time elapses", func() { + BeforeEach(func() { + producerSettings.ResyncPeriod = 10 * time.Millisecond + }) + It("Sends a resync message", func(done Done) { + messages := producer.Start(producerCtx, group) + Expect(<-messages).To(Equal(¬ifications.Message{Resync: true})) // on initial connect + Expect(<-messages).To(Equal(¬ifications.Message{Resync: true})) // first time the timer ticks + Expect(<-messages).To(Equal(¬ifications.Message{Resync: true})) // second time the timer ticks + close(done) + }, (500 * time.Millisecond).Seconds()) + }) + + Context("When a force resync (410 GONE) is triggered", func() { + BeforeEach(func() { + producerSettings.ResyncPeriod = 200 * time.Millisecond + producerSettings.ReconnectDelay = 150 * time.Millisecond + + requestCnt := 0 + server.onRequest = func(r *http.Request) { + requestCnt++ + if requestCnt == 1 { + server.statusCode = 500 + } else { + server.statusCode = 0 + } + } + }) + It("Resets the resync timer period", func() { + messages := producer.Start(producerCtx, group) + start := time.Now() + m := <-messages + t := time.Now().Sub(start) + Expect(m).To(Equal(¬ifications.Message{Resync: true})) // on initial connect + Expect(t).To(BeNumerically(">=", producerSettings.ReconnectDelay)) + + m = <-messages + t = time.Now().Sub(start) + Expect(m).To(Equal(¬ifications.Message{Resync: true})) // first time the timer ticks + Expect(t).To(BeNumerically(">=", producerSettings.ResyncPeriod+producerSettings.ReconnectDelay)) + }) + }) + }) +}) + +type wsServer struct { + url string + server *httptest.Server + mux *http.ServeMux + conn *websocket.Conn + connMutex sync.Mutex + lastNotificationRevision string + maxPingPeriod string + statusCode int + onClientConnected func(*websocket.Conn) + onRequest func(r *http.Request) + pingHandler func(string) error +} + +func newWSServer() *wsServer { + s := &wsServer{ + maxPingPeriod: (100 * time.Millisecond).String(), + lastNotificationRevision: "0", + } + mux := http.NewServeMux() + mux.HandleFunc("/v1/notifications", s.handler) + s.mux = mux + return s +} + +func (s *wsServer) Start() { + s.server = httptest.NewServer(s.mux) + s.url = s.server.URL +} + +func (s *wsServer) Close() { + if s == nil { + return + } + if s.server != nil { + s.server.Close() + } + + s.connMutex.Lock() + defer s.connMutex.Unlock() + if s.conn != nil { + s.conn.Close() + } +} + +func (s *wsServer) handler(w http.ResponseWriter, r *http.Request) { + if s.onRequest != nil { + s.onRequest(r) + } + + var err error + upgrader := websocket.Upgrader{ + ReadBufferSize: 1024, + WriteBufferSize: 1024, + } + header := http.Header{} + header.Set(smnotifications.LastKnownRevisionHeader, s.lastNotificationRevision) + header.Set(smnotifications.MaxPingPeriodHeader, s.maxPingPeriod) + if s.statusCode != 0 { + w.WriteHeader(s.statusCode) + w.Write([]byte{}) + return + } + + s.connMutex.Lock() + defer s.connMutex.Unlock() + s.conn, err = upgrader.Upgrade(w, r, header) + if err != nil { + log.C(r.Context()).WithError(err).Error("Could not upgrade websocket connection") + return + } + if s.pingHandler != nil { + s.conn.SetPingHandler(s.pingHandler) + } + if s.onClientConnected != nil { + s.onClientConnected(s.conn) + } + go reader(s.conn) +} + +func reader(conn *websocket.Conn) { + for { + _, _, err := conn.ReadMessage() + if err != nil { + return + } + } +} + +type logWriter struct { + strings.Builder + bufferMutex sync.Mutex +} + +func (w *logWriter) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (w *logWriter) Fire(entry *logrus.Entry) error { + str, err := entry.String() + if err != nil { + return err + } + w.bufferMutex.Lock() + defer w.bufferMutex.Unlock() + _, err = w.WriteString(str) + return err +} + +func (w *logWriter) String() string { + w.bufferMutex.Lock() + defer w.bufferMutex.Unlock() + return w.Builder.String() +} diff --git a/pkg/sbproxy/options.go b/pkg/sbproxy/options.go index 2b951f55..32001c00 100644 --- a/pkg/sbproxy/options.go +++ b/pkg/sbproxy/options.go @@ -17,6 +17,7 @@ package sbproxy import ( + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications" "github.com/Peripli/service-broker-proxy/pkg/sbproxy/reconcile" "github.com/Peripli/service-broker-proxy/pkg/sm" "github.com/Peripli/service-manager/pkg/env" @@ -28,10 +29,11 @@ import ( // Settings type holds all config properties for the sbproxy type Settings struct { - Server *server.Settings `mapstructure:"server"` - Log *log.Settings `mapstructure:"log"` - Sm *sm.Settings `mapstructure:"sm"` - Reconcile *reconcile.Settings `mapstructure:"app"` + Server *server.Settings `mapstructure:"server"` + Log *log.Settings `mapstructure:"log"` + Sm *sm.Settings `mapstructure:"sm"` + Reconcile *reconcile.Settings `mapstructure:"app"` + Producer *notifications.ProducerSettings `mapstructure:"producer"` } // DefaultSettings returns default value for the proxy settings @@ -41,6 +43,7 @@ func DefaultSettings() *Settings { Log: log.DefaultSettings(), Sm: sm.DefaultSettings(), Reconcile: reconcile.DefaultSettings(), + Producer: notifications.DefaultProducerSettings(), } } @@ -63,7 +66,7 @@ func AddPFlags(set *pflag.FlagSet) { // Validate validates that the configuration contains all mandatory properties func (c *Settings) Validate() error { - validatable := []util.InputValidator{c.Server, c.Log, c.Sm, c.Reconcile} + validatable := []util.InputValidator{c.Server, c.Log, c.Sm, c.Reconcile, c.Producer} for _, item := range validatable { if err := item.Validate(); err != nil { diff --git a/pkg/sbproxy/options_test.go b/pkg/sbproxy/options_test.go index 86da8d3b..b8abced8 100644 --- a/pkg/sbproxy/options_test.go +++ b/pkg/sbproxy/options_test.go @@ -18,6 +18,8 @@ package sbproxy import ( "fmt" + "time" + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/reconcile" "github.com/Peripli/service-broker-proxy/pkg/sm" "github.com/Peripli/service-manager/pkg/env/envfakes" @@ -25,7 +27,6 @@ import ( "github.com/Peripli/service-manager/pkg/server" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "time" ) var _ = Describe("Config", func() { @@ -82,7 +83,6 @@ var _ = Describe("Config", func() { URL: "https://sm.com", OSBAPIPath: "/osb", RequestTimeout: 5 * time.Second, - ResyncPeriod: 5 * time.Minute, SkipSSLValidation: true, }, Reconcile: &reconcile.Settings{ @@ -156,7 +156,6 @@ var _ = Describe("Config", func() { URL: "https://sm.com", OSBAPIPath: "/osb", RequestTimeout: 5 * time.Second, - ResyncPeriod: 5 * time.Minute, SkipSSLValidation: true, }, Reconcile: &reconcile.Settings{ diff --git a/pkg/sbproxy/reconcile/reconcile_brokers.go b/pkg/sbproxy/reconcile/reconcile_brokers.go index 0c839f91..cd4c2556 100644 --- a/pkg/sbproxy/reconcile/reconcile_brokers.go +++ b/pkg/sbproxy/reconcile/reconcile_brokers.go @@ -17,6 +17,8 @@ package reconcile import ( + "context" + "github.com/Peripli/service-manager/pkg/log" "strings" @@ -33,21 +35,21 @@ type brokerKey struct { // processBrokers handles the reconciliation of the service brokers. // it gets the brokers from SM and the platform and runs the reconciliation -func (r *ReconciliationTask) processBrokers() { - logger := log.C(r.runContext) +func (r *resyncJob) processBrokers(ctx context.Context) { + logger := log.C(ctx) if r.platformClient.Broker() == nil { logger.Debug("Platform client cannot handle brokers. Broker reconciliation will be skipped.") return } // get all the registered brokers from the platform - brokersFromPlatform, err := r.getBrokersFromPlatform() + brokersFromPlatform, err := r.getBrokersFromPlatform(ctx) if err != nil { logger.WithError(err).Error("An error occurred while obtaining already registered brokers") return } - brokersFromSM, err := r.getBrokersFromSM() + brokersFromSM, err := r.getBrokersFromSM(ctx) if err != nil { logger.WithError(err).Error("An error occurred while obtaining brokers from Service Manager") return @@ -55,57 +57,52 @@ func (r *ReconciliationTask) processBrokers() { r.stats[smBrokersStats] = brokersFromSM // control logic - make sure current state matches desired state - r.reconcileBrokers(brokersFromPlatform, brokersFromSM) + r.reconcileBrokers(ctx, brokersFromPlatform, brokersFromSM) } // reconcileBrokers attempts to reconcile the current brokers state in the platform (existingBrokers) // to match the desired broker state coming from the Service Manager (payloadBrokers). -func (r *ReconciliationTask) reconcileBrokers(existingBrokers []platform.ServiceBroker, payloadBrokers []platform.ServiceBroker) { +func (r *resyncJob) reconcileBrokers(ctx context.Context, existingBrokers []platform.ServiceBroker, payloadBrokers []platform.ServiceBroker) { brokerKeyMap := indexBrokersByKey(existingBrokers) proxyBrokerIDMap := indexProxyBrokersByID(existingBrokers, r.proxyPath) for _, payloadBroker := range payloadBrokers { existingBroker := proxyBrokerIDMap[payloadBroker.GUID] delete(proxyBrokerIDMap, payloadBroker.GUID) - platformBroker, knownToPlatform := brokerKeyMap[getBrokerKey(&payloadBroker)] - // Broker is registered in platform, but not already known to this broker proxy - if knownToPlatform { - r.updateBrokerRegistration(platformBroker.GUID, &payloadBroker) - } else if existingBroker == nil { - r.createBrokerRegistration(&payloadBroker) + knownToSM := existingBroker != nil + // Broker is registered in platform, this is its first registration in SM but not already known to this broker proxy + if knownToPlatform && !knownToSM { + r.updateBrokerRegistration(ctx, platformBroker.GUID, &payloadBroker) + } else if !knownToSM { + r.createBrokerRegistration(ctx, &payloadBroker) } else { - r.fetchBrokerCatalog(existingBroker) + r.fetchBrokerCatalog(ctx, existingBroker) } } for _, existingBroker := range proxyBrokerIDMap { - r.deleteBrokerRegistration(existingBroker) + r.deleteBrokerRegistration(ctx, existingBroker) } } -func (r *ReconciliationTask) getBrokersFromPlatform() ([]platform.ServiceBroker, error) { - logger := log.C(r.runContext) - logger.Info("ReconciliationTask task getting brokers from platform...") - registeredBrokers, err := r.platformClient.Broker().GetBrokers(r.runContext) +func (r *resyncJob) getBrokersFromPlatform(ctx context.Context) ([]platform.ServiceBroker, error) { + logger := log.C(ctx) + logger.Info("resyncJob getting brokers from platform...") + registeredBrokers, err := r.platformClient.Broker().GetBrokers(ctx) if err != nil { return nil, errors.Wrap(err, "error getting brokers from platform") } + logger.Debugf("resyncJob SUCCESSFULLY retrieved %d brokers from platform", len(registeredBrokers)) - brokersFromPlatform := make([]platform.ServiceBroker, 0, len(registeredBrokers)) - for _, broker := range registeredBrokers { - logger.WithFields(logBroker(&broker)).Debug("ReconciliationTask task FOUND registered broker... ") - brokersFromPlatform = append(brokersFromPlatform, broker) - } - logger.Infof("ReconciliationTask task SUCCESSFULLY retrieved %d brokers from platform", len(brokersFromPlatform)) - return brokersFromPlatform, nil + return registeredBrokers, nil } -func (r *ReconciliationTask) getBrokersFromSM() ([]platform.ServiceBroker, error) { - logger := log.C(r.runContext) - logger.Info("ReconciliationTask task getting brokers from Service Manager") +func (r *resyncJob) getBrokersFromSM(ctx context.Context) ([]platform.ServiceBroker, error) { + logger := log.C(ctx) + logger.Info("resyncJob getting brokers from Service Manager...") - proxyBrokers, err := r.smClient.GetBrokers(r.runContext) + proxyBrokers, err := r.smClient.GetBrokers(ctx) if err != nil { return nil, errors.Wrap(err, "error getting brokers from SM") } @@ -121,42 +118,42 @@ func (r *ReconciliationTask) getBrokersFromSM() ([]platform.ServiceBroker, error } brokersFromSM = append(brokersFromSM, brokerReg) } - logger.Infof("ReconciliationTask task SUCCESSFULLY retrieved %d brokers from Service Manager", len(brokersFromSM)) + logger.Infof("resyncJob SUCCESSFULLY retrieved %d brokers from Service Manager", len(brokersFromSM)) return brokersFromSM, nil } -func (r *ReconciliationTask) fetchBrokerCatalog(broker *platform.ServiceBroker) { +func (r *resyncJob) fetchBrokerCatalog(ctx context.Context, broker *platform.ServiceBroker) { if f, isFetcher := r.platformClient.(platform.CatalogFetcher); isFetcher { - logger := log.C(r.runContext) - logger.WithFields(logBroker(broker)).Infof("ReconciliationTask task refetching catalog for broker") - if err := f.Fetch(r.runContext, broker); err != nil { + logger := log.C(ctx) + logger.WithFields(logBroker(broker)).Infof("resyncJob refetching catalog for broker...") + if err := f.Fetch(ctx, broker); err != nil { logger.WithFields(logBroker(broker)).WithError(err).Error("Error during fetching catalog...") } else { - logger.WithFields(logBroker(broker)).Info("ReconciliationTask task SUCCESSFULLY refetched catalog for broker") + logger.WithFields(logBroker(broker)).Info("resyncJob SUCCESSFULLY refetched catalog for broker") } } } -func (r *ReconciliationTask) createBrokerRegistration(broker *platform.ServiceBroker) { - logger := log.C(r.runContext) - logger.WithFields(logBroker(broker)).Info("ReconciliationTask task attempting to create proxy for broker in platform...") +func (r *resyncJob) createBrokerRegistration(ctx context.Context, broker *platform.ServiceBroker) { + logger := log.C(ctx) + logger.WithFields(logBroker(broker)).Info("resyncJob creating proxy for broker in platform...") createRequest := &platform.CreateServiceBrokerRequest{ Name: r.options.BrokerPrefix + broker.Name, BrokerURL: r.proxyPath + "/" + broker.GUID, } - if b, err := r.platformClient.Broker().CreateBroker(r.runContext, createRequest); err != nil { + if b, err := r.platformClient.Broker().CreateBroker(ctx, createRequest); err != nil { logger.WithFields(logBroker(broker)).WithError(err).Error("Error during broker creation") } else { - logger.WithFields(logBroker(b)).Infof("ReconciliationTask task SUCCESSFULLY created proxy for broker at platform under name [%s] accessible at [%s]", createRequest.Name, createRequest.BrokerURL) + logger.WithFields(logBroker(b)).Infof("resyncJob SUCCESSFULLY created proxy for broker at platform under name [%s] accessible at [%s]", createRequest.Name, createRequest.BrokerURL) } } -func (r *ReconciliationTask) updateBrokerRegistration(brokerGUID string, broker *platform.ServiceBroker) { - logger := log.C(r.runContext) - logger.WithFields(logBroker(broker)).Info("ReconciliationTask task attempting to update broker registration in platform...") +func (r *resyncJob) updateBrokerRegistration(ctx context.Context, brokerGUID string, broker *platform.ServiceBroker) { + logger := log.C(ctx) + logger.WithFields(logBroker(broker)).Info("resyncJob updating broker registration in platform...") updateRequest := &platform.UpdateServiceBrokerRequest{ GUID: brokerGUID, @@ -164,26 +161,26 @@ func (r *ReconciliationTask) updateBrokerRegistration(brokerGUID string, broker BrokerURL: r.proxyPath + "/" + broker.GUID, } - if b, err := r.platformClient.Broker().UpdateBroker(r.runContext, updateRequest); err != nil { + if b, err := r.platformClient.Broker().UpdateBroker(ctx, updateRequest); err != nil { logger.WithFields(logBroker(broker)).WithError(err).Error("Error during broker update") } else { - logger.WithFields(logBroker(b)).Infof("ReconciliationTask task SUCCESSFULLY updated broker registration at platform under name [%s] accessible at [%s]", updateRequest.Name, updateRequest.BrokerURL) + logger.WithFields(logBroker(b)).Infof("resyncJob SUCCESSFULLY updated broker registration at platform under name [%s] accessible at [%s]", updateRequest.Name, updateRequest.BrokerURL) } } -func (r *ReconciliationTask) deleteBrokerRegistration(broker *platform.ServiceBroker) { - logger := log.C(r.runContext) - logger.WithFields(logBroker(broker)).Info("ReconciliationTask task attempting to delete broker from platform...") +func (r *resyncJob) deleteBrokerRegistration(ctx context.Context, broker *platform.ServiceBroker) { + logger := log.C(ctx) + logger.WithFields(logBroker(broker)).Info("resyncJob deleting broker from platform...") deleteRequest := &platform.DeleteServiceBrokerRequest{ GUID: broker.GUID, Name: broker.Name, } - if err := r.platformClient.Broker().DeleteBroker(r.runContext, deleteRequest); err != nil { + if err := r.platformClient.Broker().DeleteBroker(ctx, deleteRequest); err != nil { logger.WithFields(logBroker(broker)).WithError(err).Error("Error during broker deletion") } else { - logger.WithFields(logBroker(broker)).Infof("ReconciliationTask task SUCCESSFULLY deleted proxy broker from platform with name [%s]", deleteRequest.Name) + logger.WithFields(logBroker(broker)).Infof("resyncJob SUCCESSFULLY deleted proxy broker from platform with name [%s]", deleteRequest.Name) } } diff --git a/pkg/sbproxy/reconcile/reconcile_brokers_test.go b/pkg/sbproxy/reconcile/reconcile_brokers_test.go index ca6f7cef..f58794f5 100644 --- a/pkg/sbproxy/reconcile/reconcile_brokers_test.go +++ b/pkg/sbproxy/reconcile/reconcile_brokers_test.go @@ -19,9 +19,10 @@ package reconcile import ( "context" "fmt" - "sync" "time" + "github.com/patrickmn/go-cache" + "github.com/Peripli/service-broker-proxy/pkg/platform" "github.com/Peripli/service-broker-proxy/pkg/platform/platformfakes" "github.com/Peripli/service-broker-proxy/pkg/sm" @@ -30,12 +31,10 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" . "github.com/onsi/gomega" - "github.com/patrickmn/go-cache" ) var _ = Describe("Reconcile brokers", func() { const fakeAppHost = "https://smproxy.com" - const brokerPrefix = "sm-proxy-" var ( fakeSMClient *smfakes.FakeClient @@ -43,9 +42,7 @@ var _ = Describe("Reconcile brokers", func() { fakePlatformCatalogFetcher *platformfakes.FakeCatalogFetcher fakePlatformBrokerClient *platformfakes.FakeBrokerClient - waitGroup *sync.WaitGroup - - reconciliationTask *ReconciliationTask + reconciler *Reconciler smbroker1 sm.Broker smbroker2 sm.Broker @@ -92,7 +89,6 @@ var _ = Describe("Reconcile brokers", func() { fakePlatformClient.VisibilityReturns(nil) visibilityCache := cache.New(5*time.Minute, 10*time.Minute) - waitGroup = &sync.WaitGroup{} platformClient := struct { *platformfakes.FakeCatalogFetcher @@ -102,9 +98,9 @@ var _ = Describe("Reconcile brokers", func() { FakeClient: fakePlatformClient, } - reconciliationTask = NewTask( - context.TODO(), DefaultSettings(), waitGroup, platformClient, fakeSMClient, - fakeAppHost, visibilityCache) + reconciler = &Reconciler{ + Resyncer: NewResyncer(DefaultSettings(), platformClient, fakeSMClient, fakeAppHost, visibilityCache), + } smbroker1 = sm.Broker{ ID: "smBrokerID1", @@ -174,13 +170,13 @@ var _ = Describe("Reconcile brokers", func() { platformbroker1 = platform.ServiceBroker{ GUID: "platformBrokerID1", - Name: brokerPrefix + "smBroker1", + Name: DefaultProxyBrokerPrefix + "smBroker1", BrokerURL: fakeAppHost + "/" + smbroker1.ID, } platformbroker2 = platform.ServiceBroker{ GUID: "platformBrokerID2", - Name: brokerPrefix + "smBroker2", + Name: DefaultProxyBrokerPrefix + "smBroker2", BrokerURL: fakeAppHost + "/" + smbroker2.ID, } @@ -200,12 +196,11 @@ var _ = Describe("Reconcile brokers", func() { ID: "smBrokerID3", Name: platformbrokerNonProxy.Name, BrokerURL: platformbrokerNonProxy.BrokerURL, - // ServiceOfferings: []types.ServiceOffering{}, } platformBrokerProxy = platform.ServiceBroker{ GUID: platformbrokerNonProxy.GUID, - Name: "sm-proxy-" + smbroker3.Name, + Name: DefaultProxyBrokerPrefix + smbroker3.Name, BrokerURL: fakeAppHost + "/" + smbroker3.ID, } }) @@ -347,7 +342,7 @@ var _ = Describe("Reconcile brokers", func() { }, }), - Entry("When broker is in SM and is also in platform it should be catalog refetched and access enabled", testCase{ + Entry("When broker is in SM and is also in platform it should be catalog refetched", testCase{ stubs: func() { stubPlatformOpsToSucceed() }, @@ -417,7 +412,7 @@ var _ = Describe("Reconcile brokers", func() { }, }), - Entry("When broker that is registered in SM is also registered in the platform, but not as a proxy, it should be proxified/updated", testCase{ + Entry("When broker is registered in the platform and SM, but not yet proxified, it should be updated", testCase{ stubs: func() { stubPlatformOpsToSucceed() stubPlatformUpdateBroker() @@ -446,7 +441,7 @@ var _ = Describe("Reconcile brokers", func() { }), } - DescribeTable("Run", func(t testCase) { + DescribeTable("resync", func(t testCase) { smBrokers, err1 := t.smBrokers() platformBrokers, err2 := t.platformBrokers() @@ -454,7 +449,7 @@ var _ = Describe("Reconcile brokers", func() { fakePlatformBrokerClient.GetBrokersReturns(platformBrokers, err2) t.stubs() - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) if err1 != nil { Expect(len(fakePlatformBrokerClient.Invocations())).To(Equal(1)) diff --git a/pkg/sbproxy/reconcile/reconcile_settings.go b/pkg/sbproxy/reconcile/reconcile_settings.go index d84d6f5b..5dfe5c0e 100644 --- a/pkg/sbproxy/reconcile/reconcile_settings.go +++ b/pkg/sbproxy/reconcile/reconcile_settings.go @@ -23,7 +23,8 @@ import ( "github.com/pkg/errors" ) -const defaultProxyBrokerPrefix = "sm-proxy-" +// DefaultProxyBrokerPrefix prefix for brokers registered by the proxy +const DefaultProxyBrokerPrefix = "sm-" // Settings type represents the sbproxy settings type Settings struct { @@ -43,7 +44,7 @@ func DefaultSettings() *Settings { URL: "", Username: "", Password: "", - BrokerPrefix: defaultProxyBrokerPrefix, + BrokerPrefix: DefaultProxyBrokerPrefix, VisibilityCache: true, CacheExpiration: 2 * time.Hour, } diff --git a/pkg/sbproxy/reconcile/reconcile_task.go b/pkg/sbproxy/reconcile/reconcile_task.go deleted file mode 100644 index 0949ee89..00000000 --- a/pkg/sbproxy/reconcile/reconcile_task.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2018 The Service Manager Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package reconcile - -import ( - "context" - "sync" - "sync/atomic" - - "github.com/Peripli/service-broker-proxy/pkg/platform" - "github.com/Peripli/service-broker-proxy/pkg/sm" - "github.com/Peripli/service-manager/pkg/log" - "github.com/gofrs/uuid" - cache "github.com/patrickmn/go-cache" - "github.com/pkg/errors" -) - -const running int32 = 1 -const notRunning int32 = 0 - -const smBrokersStats = "sm_brokers" - -// ReconciliationTask type represents a registration task that takes care of propagating broker and visibility -// creations and deletions to the platform. It reconciles the state of the proxy brokers and visibilities -// in the platform to match the desired state provided by the Service Manager. -// TODO if the reg credentials are changed (the ones under cf.reg) we need to update the already registered brokers -type ReconciliationTask struct { - options *Settings - group *sync.WaitGroup - platformClient platform.Client - smClient sm.Client - proxyPath string - cache *cache.Cache - state *int32 - - globalContext context.Context - runContext context.Context - - stats map[string]interface{} -} - -// NewTask builds a new ReconciliationTask -func NewTask(ctx context.Context, - options *Settings, - group *sync.WaitGroup, - platformClient platform.Client, - smClient sm.Client, - proxyPath string, - c *cache.Cache) *ReconciliationTask { - return &ReconciliationTask{ - options: options, - group: group, - platformClient: platformClient, - smClient: smClient, - proxyPath: proxyPath, - globalContext: ctx, - cache: c, - runContext: nil, - state: new(int32), - } -} - -// Run executes the registration task that is responsible for reconciling the state of the proxy -// brokers and visibilities at the platform with the brokers provided by the Service Manager -func (r *ReconciliationTask) Run() { - isAlreadyRunnning := !atomic.CompareAndSwapInt32(r.state, notRunning, running) - if isAlreadyRunnning { - log.C(r.globalContext).Warning("Reconciliation task cannot start. Another reconciliation task is already running") - return - } - defer r.end() - - r.stats = make(map[string]interface{}) - - newRunContext, taskID, err := r.generateRunContext() - if err != nil { - log.C(r.globalContext).WithError(err).Error("reconsilation task will not be scheduled") - return - } - r.runContext = newRunContext - log.C(r.globalContext).Infof("STARTING scheduled reconciliation task %s...", taskID) - - r.group.Add(1) - defer r.group.Done() - r.run() - log.C(r.globalContext).Infof("FINISHED scheduled reconciliation task %s...", taskID) -} - -func (r *ReconciliationTask) run() { - r.processBrokers() - r.processVisibilities() -} - -func (r *ReconciliationTask) end() { - atomic.StoreInt32(r.state, notRunning) -} - -func (r *ReconciliationTask) generateRunContext() (context.Context, string, error) { - correlationID, err := uuid.NewV4() - if err != nil { - return nil, "", errors.Wrap(err, "could not generate correlationID") - } - entry := log.C(r.globalContext).WithField(log.FieldCorrelationID, correlationID.String()) - return log.ContextWithLogger(r.globalContext, entry), correlationID.String(), nil -} - -func (r *ReconciliationTask) stat(key string) interface{} { - result, found := r.stats[key] - if !found { - log.C(r.runContext).Infof("No %s found in cache", key) - return nil - } - - log.C(r.runContext).Infof("Picked up %s from cache", key) - - return result -} diff --git a/pkg/sbproxy/reconcile/reconcile_task_test.go b/pkg/sbproxy/reconcile/reconcile_task_test.go deleted file mode 100644 index cb593df4..00000000 --- a/pkg/sbproxy/reconcile/reconcile_task_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* - * Copyright 2018 The Service Manager Authors - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package reconcile - -import ( - "context" - "sync" - "time" - - "github.com/Peripli/service-broker-proxy/pkg/platform" - "github.com/pkg/errors" - - "github.com/Peripli/service-broker-proxy/pkg/platform/platformfakes" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var _ = Describe("Reconcile", func() { - - Describe("Run", func() { - var ( - stopRun chan bool - isRunning chan bool - wg sync.WaitGroup - platformClient platformfakes.FakeClient - task *ReconciliationTask - ) - - startRunAsync := func() { - wg.Add(1) - go func() { - defer wg.Done() - task.Run() - }() - } - - waitForChannel := func(c chan bool) error { - select { - case <-c: - return nil - case <-time.After(5 * time.Second): - return errors.New("channel timed out") - } - } - - BeforeEach(func() { - stopRun = make(chan bool, 1) - isRunning = make(chan bool, 1) - platformClient.BrokerStub = func() platform.BrokerClient { - isRunning <- true - Expect(waitForChannel(stopRun)).ToNot(HaveOccurred()) - return nil - } - task = NewTask(context.TODO(), nil, &sync.WaitGroup{}, &platformClient, nil, "", nil) - }) - - AfterEach(func() { - wg.Wait() - }) - - Context("when another task is not yet finished", func() { - - It("should not be started", func() { - startRunAsync() - Expect(waitForChannel(isRunning)).ToNot(HaveOccurred()) - task.Run() - stopRun <- true - }) - }) - - Context("when the first task has finished", func() { - - It("should finish the second one", func() { - startRunAsync() - stopRun <- true - Expect(waitForChannel(isRunning)).ToNot(HaveOccurred()) - wg.Wait() - - startRunAsync() - stopRun <- true - }) - }) - - }) -}) diff --git a/pkg/sbproxy/reconcile/reconcile_visibilities.go b/pkg/sbproxy/reconcile/reconcile_visibilities.go index 1d8cc62a..45ae8778 100644 --- a/pkg/sbproxy/reconcile/reconcile_visibilities.go +++ b/pkg/sbproxy/reconcile/reconcile_visibilities.go @@ -18,13 +18,10 @@ package reconcile import ( "context" - "encoding/json" "strings" "sync" "time" - "github.com/sirupsen/logrus" - "github.com/Peripli/service-broker-proxy/pkg/platform" "github.com/Peripli/service-manager/pkg/log" "github.com/Peripli/service-manager/pkg/types" @@ -35,19 +32,31 @@ const ( smPlansCacheKey = "sm_plans" ) +func (r *resyncJob) stat(ctx context.Context, key string) interface{} { + result, found := r.stats[key] + if !found { + log.C(ctx).Infof("No %s found in cache", key) + return nil + } + + log.C(ctx).Infof("Picked up %s from cache", key) + + return result +} + // processVisibilities handles the reconsilation of the service visibilities. // It gets the service visibilities from Service Manager and the platform and runs the reconciliation // nolint: gocyclo -func (r *ReconciliationTask) processVisibilities() { - logger := log.C(r.runContext) +func (r *resyncJob) processVisibilities(ctx context.Context) { + logger := log.C(ctx) if r.platformClient.Visibility() == nil { logger.Debug("Platform client cannot handle visibilities. Visibility reconciliation will be skipped.") return } - brokerFromStats := r.stat(smBrokersStats) + brokerFromStats := r.stat(ctx, smBrokersStats) smBrokers, ok := brokerFromStats.([]platform.ServiceBroker) - logger.Infof("ReconciliationTask SUCCESSFULLY retrieved %d Service Manager brokers from cache", len(smBrokers)) + logger.Infof("resyncJob SUCCESSFULLY retrieved %d Service Manager brokers from cache", len(smBrokers)) if !ok { logger.Errorf("Expected %T to be %T", brokerFromStats, smBrokers) @@ -58,43 +67,43 @@ func (r *ReconciliationTask) processVisibilities() { return } - smOfferings, err := r.getSMServiceOfferingsByBrokers(smBrokers) + smOfferings, err := r.getSMServiceOfferingsByBrokers(ctx, smBrokers) if err != nil { logger.WithError(err).Error("An error occurred while obtaining service offerings from Service Manager") return } - smPlans, err := r.getSMPlansByBrokersAndOfferings(smOfferings) + smPlans, err := r.getSMPlansByBrokersAndOfferings(ctx, smOfferings) if err != nil { logger.WithError(err).Error("An error occurred while obtaining plans from Service Manager") return } plansMap := smPlansToMap(smPlans) - smVisibilities, err := r.getSMVisibilities(plansMap, smBrokers) + smVisibilities, err := r.getSMVisibilities(ctx, plansMap, smBrokers) if err != nil { logger.WithError(err).Error("An error occurred while obtaining Service Manager visibilities") return } - var platformVisibilities []*platform.ServiceVisibilityEntity + var platformVisibilities []*platform.Visibility visibilityCacheUsed := false - if r.options.VisibilityCache && r.areSMPlansSame(smPlans) { + if r.options.VisibilityCache && r.areSMPlansSame(ctx, smPlans) { logger.Infof("Actual SM plans and cached SM plans are same. Attempting to pick up cached platform visibilities...") - platformVisibilities = r.getPlatformVisibilitiesFromCache() + platformVisibilities = r.getPlatformVisibilitiesFromCache(ctx) visibilityCacheUsed = true } if platformVisibilities == nil { logger.Infof("Actual SM plans and cached SM plans are different. Invalidating cached platform visibilities and calling platform API to fetch actual platform visibilities...") - platformVisibilities, err = r.getPlatformVisibilitiesByBrokersFromPlatform(smBrokers) + platformVisibilities, err = r.getPlatformVisibilitiesByBrokersFromPlatform(ctx, smBrokers) if err != nil { logger.WithError(err).Error("An error occurred while loading visibilities from platform") return } } - errorOccured := r.reconcileServiceVisibilities(platformVisibilities, smVisibilities) + errorOccured := r.reconcileServiceVisibilities(ctx, platformVisibilities, smVisibilities) if errorOccured { logger.Error("Could not reconcile visibilities") } @@ -104,14 +113,14 @@ func (r *ReconciliationTask) processVisibilities() { r.cache.Delete(platformVisibilityCacheKey) r.cache.Delete(smPlansCacheKey) } else { - r.updateVisibilityCache(visibilityCacheUsed, plansMap, smVisibilities) + r.updateVisibilityCache(ctx, visibilityCacheUsed, plansMap, smVisibilities) } } } // updateVisibilityCache -func (r *ReconciliationTask) updateVisibilityCache(visibilityCacheUsed bool, plansMap map[brokerPlanKey]*types.ServicePlan, visibilities []*platform.ServiceVisibilityEntity) { - log.C(r.globalContext).Infof("Updating cache with the %d newly fetched SM plans as cached-SM-plans and expiration duration %s", len(plansMap), r.options.CacheExpiration) +func (r *resyncJob) updateVisibilityCache(ctx context.Context, visibilityCacheUsed bool, plansMap map[brokerPlanKey]*types.ServicePlan, visibilities []*platform.Visibility) { + log.C(ctx).Infof("Updating cache with the %d newly fetched SM plans as cached-SM-plans and expiration duration %s", len(plansMap), r.options.CacheExpiration) r.cache.Set(smPlansCacheKey, plansMap, r.options.CacheExpiration) visibilitiesExpiration := r.options.CacheExpiration if visibilityCacheUsed { @@ -121,20 +130,20 @@ func (r *ReconciliationTask) updateVisibilityCache(visibilityCacheUsed bool, pla } } - log.C(r.globalContext).Infof("Updating cache with the %d newly fetched SM visibilities as cached-platform-visibilities and expiration duration %s", len(visibilities), visibilitiesExpiration) + log.C(ctx).Infof("Updating cache with the %d newly fetched SM visibilities as cached-platform-visibilities and expiration duration %s", len(visibilities), visibilitiesExpiration) r.cache.Set(platformVisibilityCacheKey, visibilities, visibilitiesExpiration) } // areSMPlansSame checks if there are new or deleted plans in SM. // Returns true if there are no new or deleted plans, false otherwise -func (r *ReconciliationTask) areSMPlansSame(plans map[string][]*types.ServicePlan) bool { +func (r *resyncJob) areSMPlansSame(ctx context.Context, plans map[string][]*types.ServicePlan) bool { cachedPlans, isPresent := r.cache.Get(smPlansCacheKey) if !isPresent { return false } cachedPlansMap, ok := cachedPlans.(map[brokerPlanKey]*types.ServicePlan) if !ok { - log.C(r.runContext).Error("Service Manager plans cache is in invalid state! Clearing...") + log.C(ctx).Error("Service Manager plans cache is in invalid state! Clearing...") r.cache.Delete(smPlansCacheKey) return false } @@ -153,35 +162,35 @@ func (r *ReconciliationTask) areSMPlansSame(plans map[string][]*types.ServicePla return true } -func (r *ReconciliationTask) getPlatformVisibilitiesFromCache() []*platform.ServiceVisibilityEntity { +func (r *resyncJob) getPlatformVisibilitiesFromCache(ctx context.Context) []*platform.Visibility { platformVisibilities, found := r.cache.Get(platformVisibilityCacheKey) if !found { return nil } - if result, ok := platformVisibilities.([]*platform.ServiceVisibilityEntity); ok { - log.C(r.runContext).Infof("ReconciliationTask fetched %d platform visibilities from cache", len(result)) + if result, ok := platformVisibilities.([]*platform.Visibility); ok { + log.C(ctx).Infof("resyncJob fetched %d platform visibilities from cache", len(result)) return result } - log.C(r.runContext).Error("Platform visibilities cache is in invalid state! Clearing...") + log.C(ctx).Error("Platform visibilities cache is in invalid state! Clearing...") r.cache.Delete(platformVisibilityCacheKey) return nil } -func (r *ReconciliationTask) getPlatformVisibilitiesByBrokersFromPlatform(brokers []platform.ServiceBroker) ([]*platform.ServiceVisibilityEntity, error) { - logger := log.C(r.runContext) - logger.Debug("ReconciliationTask getting visibilities from platform") +func (r *resyncJob) getPlatformVisibilitiesByBrokersFromPlatform(ctx context.Context, brokers []platform.ServiceBroker) ([]*platform.Visibility, error) { + logger := log.C(ctx) + logger.Debug("resyncJob getting visibilities from platform") names := r.brokerNames(brokers) - visibilities, err := r.platformClient.Visibility().GetVisibilitiesByBrokers(r.runContext, names) + visibilities, err := r.platformClient.Visibility().GetVisibilitiesByBrokers(ctx, names) if err != nil { return nil, err } - logger.Debugf("ReconciliationTask SUCCESSFULLY retrieved %d visibilities from platform", len(visibilities)) + logger.Debugf("resyncJob SUCCESSFULLY retrieved %d visibilities from platform", len(visibilities)) return visibilities, nil } -func (r *ReconciliationTask) brokerNames(brokers []platform.ServiceBroker) []string { +func (r *resyncJob) brokerNames(brokers []platform.ServiceBroker) []string { names := make([]string, 0, len(brokers)) for _, broker := range brokers { names = append(names, r.options.BrokerPrefix+broker.Name) @@ -189,37 +198,37 @@ func (r *ReconciliationTask) brokerNames(brokers []platform.ServiceBroker) []str return names } -func (r *ReconciliationTask) getSMPlansByBrokersAndOfferings(offerings map[string][]*types.ServiceOffering) (map[string][]*types.ServicePlan, error) { +func (r *resyncJob) getSMPlansByBrokersAndOfferings(ctx context.Context, offerings map[string][]*types.ServiceOffering) (map[string][]*types.ServicePlan, error) { result := make(map[string][]*types.ServicePlan) count := 0 for brokerID, sos := range offerings { if len(sos) == 0 { continue } - brokerPlans, err := r.smClient.GetPlansByServiceOfferings(r.runContext, sos) + brokerPlans, err := r.smClient.GetPlansByServiceOfferings(ctx, sos) if err != nil { return nil, err } result[brokerID] = brokerPlans count += len(brokerPlans) } - log.C(r.runContext).Infof("ReconciliationTask SUCCESSFULLY retrieved %d plans from Service Manager", count) + log.C(ctx).Infof("resyncJob SUCCESSFULLY retrieved %d plans from Service Manager", count) return result, nil } -func (r *ReconciliationTask) getSMServiceOfferingsByBrokers(brokers []platform.ServiceBroker) (map[string][]*types.ServiceOffering, error) { +func (r *resyncJob) getSMServiceOfferingsByBrokers(ctx context.Context, brokers []platform.ServiceBroker) (map[string][]*types.ServiceOffering, error) { result := make(map[string][]*types.ServiceOffering) brokerIDs := make([]string, 0, len(brokers)) for _, broker := range brokers { brokerIDs = append(brokerIDs, broker.GUID) } - offerings, err := r.smClient.GetServiceOfferingsByBrokerIDs(r.runContext, brokerIDs) + offerings, err := r.smClient.GetServiceOfferingsByBrokerIDs(ctx, brokerIDs) if err != nil { return nil, err } - log.C(r.runContext).Infof("ReconciliationTask SUCCESSFULLY retrieved %d services from Service Manager", len(offerings)) + log.C(ctx).Infof("resyncJob SUCCESSFULLY retrieved %d services from Service Manager", len(offerings)) for _, offering := range offerings { if result[offering.BrokerID] == nil { @@ -231,17 +240,17 @@ func (r *ReconciliationTask) getSMServiceOfferingsByBrokers(brokers []platform.S return result, nil } -func (r *ReconciliationTask) getSMVisibilities(smPlansMap map[brokerPlanKey]*types.ServicePlan, smBrokers []platform.ServiceBroker) ([]*platform.ServiceVisibilityEntity, error) { - logger := log.C(r.runContext) - logger.Info("ReconciliationTask getting visibilities from Service Manager...") +func (r *resyncJob) getSMVisibilities(ctx context.Context, smPlansMap map[brokerPlanKey]*types.ServicePlan, smBrokers []platform.ServiceBroker) ([]*platform.Visibility, error) { + logger := log.C(ctx) + logger.Info("resyncJob getting visibilities from Service Manager...") - visibilities, err := r.smClient.GetVisibilities(r.runContext) + visibilities, err := r.smClient.GetVisibilities(ctx) if err != nil { return nil, err } - logger.Infof("ReconciliationTask SUCCESSFULLY retrieved %d visibilities from Service Manager", len(visibilities)) + logger.Infof("resyncJob SUCCESSFULLY retrieved %d visibilities from Service Manager", len(visibilities)) - result := make([]*platform.ServiceVisibilityEntity, 0) + result := make([]*platform.Visibility, 0) for _, visibility := range visibilities { for _, broker := range smBrokers { @@ -257,44 +266,44 @@ func (r *ReconciliationTask) getSMVisibilities(smPlansMap map[brokerPlanKey]*typ result = append(result, converted...) } } - logger.Infof("ReconciliationTask SUCCESSFULLY converted %d Service Manager visibilities to %d platform visibilities", len(visibilities), len(result)) + logger.Infof("resyncJob SUCCESSFULLY converted %d Service Manager visibilities to %d platform visibilities", len(visibilities), len(result)) return result, nil } -func (r *ReconciliationTask) convertSMVisibility(visibility *types.Visibility, smPlan *types.ServicePlan, brokerNAME string) []*platform.ServiceVisibilityEntity { +func (r *resyncJob) convertSMVisibility(visibility *types.Visibility, smPlan *types.ServicePlan, brokerName string) []*platform.Visibility { scopeLabelKey := r.platformClient.Visibility().VisibilityScopeLabelKey() if visibility.PlatformID == "" || scopeLabelKey == "" { - return []*platform.ServiceVisibilityEntity{ + return []*platform.Visibility{ { Public: true, CatalogPlanID: smPlan.CatalogID, - PlatformBrokerName: r.options.BrokerPrefix + brokerNAME, + PlatformBrokerName: r.options.BrokerPrefix + brokerName, Labels: map[string]string{}, }, } } scopes := visibility.Labels[scopeLabelKey] - result := make([]*platform.ServiceVisibilityEntity, 0, len(scopes)) + result := make([]*platform.Visibility, 0, len(scopes)) for _, scope := range scopes { - result = append(result, &platform.ServiceVisibilityEntity{ + result = append(result, &platform.Visibility{ Public: false, CatalogPlanID: smPlan.CatalogID, - PlatformBrokerName: r.options.BrokerPrefix + brokerNAME, + PlatformBrokerName: r.options.BrokerPrefix + brokerName, Labels: map[string]string{scopeLabelKey: scope}, }) } return result } -func (r *ReconciliationTask) reconcileServiceVisibilities(platformVis, smVis []*platform.ServiceVisibilityEntity) bool { - logger := log.C(r.runContext) - logger.Info("ReconciliationTask reconciling platform and Service Manager visibilities...") +func (r *resyncJob) reconcileServiceVisibilities(ctx context.Context, platformVis, smVis []*platform.Visibility) bool { + logger := log.C(ctx) + logger.Info("resyncJob reconciling platform and Service Manager visibilities...") platformMap := r.convertVisListToMap(platformVis) - visibilitiesToCreate := make([]*platform.ServiceVisibilityEntity, 0) + visibilitiesToCreate := make([]*platform.Visibility, 0) for _, visibility := range smVis { key := r.getVisibilityKey(visibility) existingVis := platformMap[key] @@ -304,15 +313,15 @@ func (r *ReconciliationTask) reconcileServiceVisibilities(platformVis, smVis []* } } - logger.Infof("ReconciliationTask %d visibilities will be removed from the platform", len(platformMap)) - if errorOccured := r.deleteVisibilities(platformMap); errorOccured != nil { - logger.WithError(errorOccured).Error("ReconciliationTask - could not remove visibilities from platform") + logger.Infof("resyncJob %d visibilities will be removed from the platform", len(platformMap)) + if errorOccured := r.deleteVisibilities(ctx, platformMap); errorOccured != nil { + logger.WithError(errorOccured).Error("resyncJob - could not remove visibilities from platform") return true } - logger.Infof("ReconciliationTask %d visibilities will be created in the platform", len(visibilitiesToCreate)) - if errorOccured := r.createVisibilities(visibilitiesToCreate); errorOccured != nil { - logger.WithError(errorOccured).Error("ReconciliationTask - could not create visibilities in platform") + logger.Infof("resyncJob %d visibilities will be created in the platform", len(visibilitiesToCreate)) + if errorOccured := r.createVisibilities(ctx, visibilitiesToCreate); errorOccured != nil { + logger.WithError(errorOccured).Error("resyncJob - could not create visibilities in platform") return true } @@ -327,8 +336,8 @@ type visibilityProcessingState struct { ErrorOccured error } -func (r *ReconciliationTask) newVisibilityProcessingState() *visibilityProcessingState { - visibilitiesContext, cancel := context.WithCancel(r.runContext) +func (r *resyncJob) newVisibilityProcessingState(ctx context.Context) *visibilityProcessingState { + visibilitiesContext, cancel := context.WithCancel(ctx) return &visibilityProcessingState{ Ctx: visibilitiesContext, StopProcessing: cancel, @@ -336,8 +345,8 @@ func (r *ReconciliationTask) newVisibilityProcessingState() *visibilityProcessin } // deleteVisibilities deletes visibilities from platform. Returns true if error has occurred -func (r *ReconciliationTask) deleteVisibilities(visibilities map[string]*platform.ServiceVisibilityEntity) error { - state := r.newVisibilityProcessingState() +func (r *resyncJob) deleteVisibilities(ctx context.Context, visibilities map[string]*platform.Visibility) error { + state := r.newVisibilityProcessingState(ctx) defer state.StopProcessing() for _, visibility := range visibilities { @@ -347,8 +356,8 @@ func (r *ReconciliationTask) deleteVisibilities(visibilities map[string]*platfor } // createVisibilities creates visibilities from platform. Returns true if error has occurred -func (r *ReconciliationTask) createVisibilities(visibilities []*platform.ServiceVisibilityEntity) error { - state := r.newVisibilityProcessingState() +func (r *resyncJob) createVisibilities(ctx context.Context, visibilities []*platform.Visibility) error { + state := r.newVisibilityProcessingState(ctx) defer state.StopProcessing() for _, visibility := range visibilities { @@ -357,7 +366,7 @@ func (r *ReconciliationTask) createVisibilities(visibilities []*platform.Service return await(state) } -func execAsync(state *visibilityProcessingState, visibility *platform.ServiceVisibilityEntity, f func(context.Context, *platform.ServiceVisibilityEntity) error) { +func execAsync(state *visibilityProcessingState, visibility *platform.Visibility, f func(context.Context, *platform.Visibility) error) { state.WaitGroup.Add(1) go func() { @@ -380,7 +389,7 @@ func await(state *visibilityProcessingState) error { } // getVisibilityKey maps a generic visibility to a specific string. The string contains catalogID and scope for non-public plans -func (r *ReconciliationTask) getVisibilityKey(visibility *platform.ServiceVisibilityEntity) string { +func (r *resyncJob) getVisibilityKey(visibility *platform.Visibility) string { scopeLabelKey := r.platformClient.Visibility().VisibilityScopeLabelKey() const idSeparator = "|" @@ -390,50 +399,40 @@ func (r *ReconciliationTask) getVisibilityKey(visibility *platform.ServiceVisibi return strings.Join([]string{"!public", visibility.Labels[scopeLabelKey], visibility.PlatformBrokerName, visibility.CatalogPlanID}, idSeparator) } -func (r *ReconciliationTask) createVisibility(ctx context.Context, visibility *platform.ServiceVisibilityEntity) error { - logger := log.C(r.runContext) - logger.Infof("Reconciliation task attempting to create visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) +func (r *resyncJob) createVisibility(ctx context.Context, visibility *platform.Visibility) error { + logger := log.C(ctx) + logger.Infof("resyncJob creating visibility for catalog plan %s with labels %v...", visibility.CatalogPlanID, visibility.Labels) - json, err := marshalVisibilityLabels(logger, visibility.Labels) - if err != nil { - return err - } - if err = r.platformClient.Visibility().EnableAccessForPlan(ctx, json, visibility.CatalogPlanID, visibility.PlatformBrokerName); err != nil { - logger.WithError(err).Errorf("Could not enable access for plan %s", visibility.CatalogPlanID) + if err := r.platformClient.Visibility().EnableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: visibility.PlatformBrokerName, + CatalogPlanID: visibility.CatalogPlanID, + Labels: mapToLabels(visibility.Labels), + }); err != nil { return err } - logger.Infof("Reconciliation task SUCCESSFULLY created visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) + logger.Infof("resyncJob SUCCESSFULLY created visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) return nil } -func (r *ReconciliationTask) deleteVisibility(ctx context.Context, visibility *platform.ServiceVisibilityEntity) error { - logger := log.C(r.runContext) - logger.Infof("Reconciliation task attempting to delete visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) +func (r *resyncJob) deleteVisibility(ctx context.Context, visibility *platform.Visibility) error { + logger := log.C(ctx) + logger.Infof("resyncJob deleting visibility for catalog plan %s with labels %v...", visibility.CatalogPlanID, visibility.Labels) - json, err := marshalVisibilityLabels(logger, visibility.Labels) - if err != nil { - return err - } - if err = r.platformClient.Visibility().DisableAccessForPlan(ctx, json, visibility.CatalogPlanID, visibility.PlatformBrokerName); err != nil { - logger.WithError(err).Errorf("Could not disable access for plan %s", visibility.CatalogPlanID) + if err := r.platformClient.Visibility().DisableAccessForPlan(ctx, &platform.ModifyPlanAccessRequest{ + BrokerName: visibility.PlatformBrokerName, + CatalogPlanID: visibility.CatalogPlanID, + Labels: mapToLabels(visibility.Labels), + }); err != nil { return err } - logger.Infof("Reconciliation task SUCCESSFULLY deleted visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) + logger.Infof("resyncJob SUCCESSFULLY deleted visibility for catalog plan %s with labels %v", visibility.CatalogPlanID, visibility.Labels) return nil } -func marshalVisibilityLabels(logger *logrus.Entry, labels map[string]string) ([]byte, error) { - json, err := json.Marshal(labels) - if err != nil { - logger.WithError(err).Error("Could not marshal labels to json") - } - return json, err -} - -func (r *ReconciliationTask) convertVisListToMap(list []*platform.ServiceVisibilityEntity) map[string]*platform.ServiceVisibilityEntity { - result := make(map[string]*platform.ServiceVisibilityEntity, len(list)) +func (r *resyncJob) convertVisListToMap(list []*platform.Visibility) map[string]*platform.Visibility { + result := make(map[string]*platform.Visibility, len(list)) for _, vis := range list { key := r.getVisibilityKey(vis) result[key] = vis @@ -459,3 +458,13 @@ type brokerPlanKey struct { brokerID string planID string } + +func mapToLabels(m map[string]string) types.Labels { + labels := types.Labels{} + for k, v := range m { + labels[k] = []string{ + v, + } + } + return labels +} diff --git a/pkg/sbproxy/reconcile/reconcile_visibilities_test.go b/pkg/sbproxy/reconcile/reconcile_visibilities_test.go index a8e902df..4685922f 100644 --- a/pkg/sbproxy/reconcile/reconcile_visibilities_test.go +++ b/pkg/sbproxy/reconcile/reconcile_visibilities_test.go @@ -18,8 +18,6 @@ package reconcile import ( "context" - "encoding/json" - "sync" "time" "github.com/Peripli/service-broker-proxy/pkg/platform" @@ -36,7 +34,6 @@ import ( var _ = Describe("Reconcile visibilities", func() { const fakeAppHost = "https://smproxy.com" - const brokerPrefix = "sm-proxy-" var ( fakeSMClient *smfakes.FakeClient @@ -48,9 +45,7 @@ var _ = Describe("Reconcile visibilities", func() { visibilityCache *cache.Cache - waitGroup *sync.WaitGroup - - reconciliationTask *ReconciliationTask + reconciler *Reconciler smbroker1 sm.Broker smbroker2 sm.Broker @@ -115,20 +110,41 @@ var _ = Describe("Reconcile visibilities", func() { return result, nil } - checkAccessArguments := func(data json.RawMessage, servicePlanGUID, platformBrokerName string, visibilities []*platform.ServiceVisibilityEntity) { - var labels map[string]string - err := json.Unmarshal(data, &labels) - Expect(err).To(Not(HaveOccurred())) - if labels == nil { - labels = map[string]string{} + flattenLabelsMap := func(labels types.Labels) []map[string]string { + m := make([]map[string]string, len(labels), len(labels)) + for k, values := range labels { + for i, value := range values { + if m[i] == nil { + m[i] = make(map[string]string) + } + m[i][k] = value + } } - visibility := &platform.ServiceVisibilityEntity{ - Public: len(labels) == 0, - CatalogPlanID: servicePlanGUID, - Labels: labels, - PlatformBrokerName: platformBrokerName, + + return m + } + + checkAccessArguments := func(data types.Labels, servicePlanGUID, platformBrokerName string, visibilities []*platform.Visibility) { + maps := flattenLabelsMap(data) + if len(maps) == 0 { + visibility := &platform.Visibility{ + Public: true, + CatalogPlanID: servicePlanGUID, + Labels: map[string]string{}, + PlatformBrokerName: platformBrokerName, + } + Expect(visibilities).To(ContainElement(visibility)) + } else { + for _, m := range maps { + visibility := &platform.Visibility{ + Public: false, + CatalogPlanID: servicePlanGUID, + Labels: m, + PlatformBrokerName: platformBrokerName, + } + Expect(visibilities).To(ContainElement(visibility)) + } } - Expect(visibilities).To(ContainElement(visibility)) } setFakeBrokersClients := func() { @@ -151,15 +167,14 @@ var _ = Describe("Reconcile visibilities", func() { fakeVisibilityClient = &platformfakes.FakeVisibilityClient{} visibilityCache = cache.New(5*time.Minute, 10*time.Minute) - waitGroup = &sync.WaitGroup{} fakePlatformClient.BrokerReturns(fakePlatformBrokerClient) fakePlatformClient.VisibilityReturns(fakeVisibilityClient) fakePlatformClient.CatalogFetcherReturns(fakePlatformCatalogFetcher) - reconciliationTask = NewTask( - context.TODO(), DefaultSettings(), waitGroup, fakePlatformClient, fakeSMClient, - fakeAppHost, visibilityCache) + reconciler = &Reconciler{ + Resyncer: NewResyncer(DefaultSettings(), fakePlatformClient, fakeSMClient, fakeAppHost, visibilityCache), + } smPlan1 = &types.ServicePlan{ Base: types.Base{ @@ -274,13 +289,13 @@ var _ = Describe("Reconcile visibilities", func() { platformbroker1 = platform.ServiceBroker{ GUID: "platformBrokerID1", - Name: brokerPrefix + "smBroker1", + Name: DefaultProxyBrokerPrefix + "smBroker1", BrokerURL: fakeAppHost + "/" + smbroker1.ID, } platformbroker2 = platform.ServiceBroker{ GUID: "platformBrokerID2", - Name: brokerPrefix + "smBroker2", + Name: DefaultProxyBrokerPrefix + "smBroker2", BrokerURL: fakeAppHost + "/" + smbroker2.ID, } @@ -292,26 +307,25 @@ var _ = Describe("Reconcile visibilities", func() { }) type expectations struct { - enablePlanVisibilityCalledFor []*platform.ServiceVisibilityEntity - disablePlanVisibilityCalledFor []*platform.ServiceVisibilityEntity + enablePlanVisibilityCalledFor []*platform.Visibility + disablePlanVisibilityCalledFor []*platform.Visibility } type testCase struct { stubs func() - platformVisibilities func() ([]*platform.ServiceVisibilityEntity, error) - smVisibilities func() ([]*types.Visibility, error) - smPlans func() ([]*types.ServicePlan, error) - smOfferings func() ([]*types.ServiceOffering, error) - convertedSMVisibilities func() []*platform.ServiceVisibilityEntity + platformVisibilities func() ([]*platform.Visibility, error) + smVisibilities func() ([]*types.Visibility, error) + smPlans func() ([]*types.ServicePlan, error) + smOfferings func() ([]*types.ServiceOffering, error) expectations func() expectations } entries := []TableEntry{ Entry("When no visibilities are present in platform and SM - should not enable access for plan", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{}, nil + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{}, nil }, smVisibilities: func() ([]*types.Visibility, error) { return []*types.Visibility{}, nil @@ -323,8 +337,8 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When no visibilities are present in platform and there are some in SM - should reconcile", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{}, nil + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{}, nil }, smVisibilities: func() ([]*types.Visibility, error) { return []*types.Visibility{ @@ -343,35 +357,35 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + enablePlanVisibilityCalledFor: []*platform.Visibility{ { - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, }, { - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value1"}, }, }, - disablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{}, + disablePlanVisibilityCalledFor: []*platform.Visibility{}, } }, }), Entry("When visibilities in platform and in SM are the same - should do nothing", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value1"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -391,24 +405,24 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{}, - disablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{}, + enablePlanVisibilityCalledFor: []*platform.Visibility{}, + disablePlanVisibilityCalledFor: []*platform.Visibility{}, } }, }), Entry("When visibilities in platform and in SM are not the same - should reconcile", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value2"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value3"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -428,28 +442,28 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + enablePlanVisibilityCalledFor: []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value1"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, - disablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + disablePlanVisibilityCalledFor: []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value2"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value3"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, } @@ -460,8 +474,8 @@ var _ = Describe("Reconcile visibilities", func() { stubs: func() { fakeVisibilityClient.EnableAccessForPlanReturnsOnCall(0, errors.New("Expected")) }, - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{}, nil + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{}, nil }, smVisibilities: func() ([]*types.Visibility, error) { return []*types.Visibility{ @@ -488,16 +502,16 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + enablePlanVisibilityCalledFor: []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, Labels: map[string]string{"key": "value1"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, } @@ -508,17 +522,17 @@ var _ = Describe("Reconcile visibilities", func() { stubs: func() { fakeVisibilityClient.DisableAccessForPlanReturnsOnCall(0, errors.New("Expected")) }, - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, Labels: map[string]string{"key": "value1"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -528,16 +542,16 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - disablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + disablePlanVisibilityCalledFor: []*platform.Visibility{ { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{"key": "value0"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, { CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, Labels: map[string]string{"key": "value1"}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, } @@ -545,8 +559,8 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When visibility from SM doesn't have scope label and scope is enabled - should not enable visibility", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{}, nil + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{}, nil }, smVisibilities: func() ([]*types.Visibility, error) { return []*types.Visibility{ @@ -571,8 +585,8 @@ var _ = Describe("Reconcile visibilities", func() { stubs: func() { fakeVisibilityClient.VisibilityScopeLabelKeyReturns("") }, - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{}, nil + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{}, nil }, smVisibilities: func() ([]*types.Visibility, error) { return []*types.Visibility{ @@ -590,12 +604,12 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + enablePlanVisibilityCalledFor: []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, } @@ -603,12 +617,12 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When visibilities in platform and in SM are both public - should reconcile", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -622,20 +636,20 @@ var _ = Describe("Reconcile visibilities", func() { smPlans: stubGetSMPlans, expectations: func() expectations { return expectations{ - enablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + enablePlanVisibilityCalledFor: []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[0].CatalogID, Labels: map[string]string{}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, - disablePlanVisibilityCalledFor: []*platform.ServiceVisibilityEntity{ + disablePlanVisibilityCalledFor: []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, Labels: map[string]string{}, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, } @@ -643,12 +657,12 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When plans from SM could not be fetched - should not reconcile", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -667,12 +681,12 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When visibilities from SM cannot be fetched - no reconcilation is done", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { - return []*platform.ServiceVisibilityEntity{ + platformVisibilities: func() ([]*platform.Visibility, error) { + return []*platform.Visibility{ { Public: true, CatalogPlanID: smbroker1.ServiceOfferings[0].Plans[1].CatalogID, - PlatformBrokerName: brokerPrefix + smbroker1.Name, + PlatformBrokerName: DefaultProxyBrokerPrefix + smbroker1.Name, }, }, nil }, @@ -686,7 +700,7 @@ var _ = Describe("Reconcile visibilities", func() { }), Entry("When visibilities from platform cannot be fetched - no reconcilation is done", testCase{ - platformVisibilities: func() ([]*platform.ServiceVisibilityEntity, error) { + platformVisibilities: func() ([]*platform.Visibility, error) { return nil, errors.New("Expected") }, smVisibilities: func() ([]*types.Visibility, error) { @@ -703,7 +717,7 @@ var _ = Describe("Reconcile visibilities", func() { }), } - DescribeTable("Run", func(t testCase) { + DescribeTable("Resync", func(t testCase) { setFakeBrokersClients() fakeSMClient.GetVisibilitiesReturns(t.smVisibilities()) @@ -723,7 +737,7 @@ var _ = Describe("Reconcile visibilities", func() { t.stubs() } - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) Expect(fakeSMClient.GetBrokersCallCount()).To(Equal(1)) Expect(fakePlatformBrokerClient.GetBrokersCallCount()).To(Equal(1)) @@ -733,26 +747,26 @@ var _ = Describe("Reconcile visibilities", func() { Expect(fakeVisibilityClient.EnableAccessForPlanCallCount()).To(Equal(len(expected.enablePlanVisibilityCalledFor))) for index := range expected.enablePlanVisibilityCalledFor { - _, data, servicePlanGUID, platformBrokerName := fakeVisibilityClient.EnableAccessForPlanArgsForCall(index) - checkAccessArguments(data, servicePlanGUID, platformBrokerName, expected.enablePlanVisibilityCalledFor) + _, request := fakeVisibilityClient.EnableAccessForPlanArgsForCall(index) + checkAccessArguments(request.Labels, request.CatalogPlanID, request.BrokerName, expected.enablePlanVisibilityCalledFor) } Expect(fakeVisibilityClient.DisableAccessForPlanCallCount()).To(Equal(len(expected.disablePlanVisibilityCalledFor))) for index := range expected.disablePlanVisibilityCalledFor { - _, data, servicePlanGUID, platformBrokerName := fakeVisibilityClient.DisableAccessForPlanArgsForCall(index) - checkAccessArguments(data, servicePlanGUID, platformBrokerName, expected.disablePlanVisibilityCalledFor) + _, request := fakeVisibilityClient.DisableAccessForPlanArgsForCall(index) + checkAccessArguments(request.Labels, request.CatalogPlanID, request.BrokerName, expected.disablePlanVisibilityCalledFor) } }, entries...) - Describe("Run cache", func() { + Describe("Resync cache", func() { setVisibilityClients := func() { fakeSMClient.GetVisibilitiesReturns([]*types.Visibility{}, nil) fakeSMClient.GetPlansReturns(stubGetSMPlans()) - fakeVisibilityClient.GetVisibilitiesByBrokersReturns([]*platform.ServiceVisibilityEntity{}, nil) + fakeVisibilityClient.GetVisibilitiesByBrokersReturns([]*platform.Visibility{}, nil) fakeVisibilityClient.VisibilityScopeLabelKeyReturns("key") } @@ -771,14 +785,14 @@ var _ = Describe("Reconcile visibilities", func() { BeforeEach(func() { setFakes() - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(1, 1) }) Context("when visibility cache is invalid", func() { It("should call platform", func() { visibilityCache.Replace(platformVisibilityCacheKey, nil, time.Minute) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) @@ -789,7 +803,7 @@ var _ = Describe("Reconcile visibilities", func() { Expect(found).To(BeTrue()) visibilityCache.Set(platformVisibilityCacheKey, visibilities, time.Nanosecond) time.Sleep(time.Nanosecond) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) @@ -797,7 +811,7 @@ var _ = Describe("Reconcile visibilities", func() { Context("when plan cache is invalid", func() { It("should call platform", func() { visibilityCache.Replace(smPlansCacheKey, nil, time.Minute) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) @@ -808,14 +822,14 @@ var _ = Describe("Reconcile visibilities", func() { Expect(found).To(BeTrue()) visibilityCache.Set(smPlansCacheKey, plans, time.Nanosecond) time.Sleep(time.Nanosecond) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) Context("when there are no changes in SM plans", func() { It("should use cache", func() { - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 1) }) }) @@ -829,7 +843,7 @@ var _ = Describe("Reconcile visibilities", func() { fakeSMClient.GetPlansByServiceOfferingsReturns([]*types.ServicePlan{ smbroker1.ServiceOfferings[0].Plans[0], }, nil) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) @@ -845,7 +859,7 @@ var _ = Describe("Reconcile visibilities", func() { smbroker1.ServiceOfferings[1].Plans[0], }, nil) - reconciliationTask.Run() + reconciler.Resyncer.Resync(context.TODO()) assertCallCounts(2, 2) }) }) diff --git a/pkg/sbproxy/reconcile/reconciler.go b/pkg/sbproxy/reconcile/reconciler.go new file mode 100644 index 00000000..b03875e4 --- /dev/null +++ b/pkg/sbproxy/reconcile/reconciler.go @@ -0,0 +1,91 @@ +/* + * Copyright 2018 The Service Manager Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reconcile + +import ( + "context" + "sync" + + "github.com/Peripli/service-manager/pkg/types" + + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications" + "github.com/Peripli/service-manager/pkg/log" +) + +const smBrokersStats = "sm_brokers" + +// Consumer provides functionality for consuming notifications +type Consumer interface { + Consume(ctx context.Context, notification *types.Notification) +} + +// Resyncer provides functionality for triggering a resync on the platform +type Resyncer interface { + Resync(ctx context.Context) +} + +// Reconciler takes care of propagating broker and visibility changes to the platform. +// TODO if the reg credentials are changed (the ones under cf.reg) we need to update the already registered brokers +type Reconciler struct { + Consumer Consumer + Resyncer Resyncer +} + +// Reconcile listens for notification messages and either consumes the notification or triggers a resync +func (r *Reconciler) Reconcile(ctx context.Context, messages <-chan *notifications.Message, group *sync.WaitGroup) { + group.Add(1) + go r.process(ctx, messages, group) +} + +// Process resync and notification messages sequentially in one goroutine +// to avoid concurrent changes in the platform +func (r *Reconciler) process(ctx context.Context, messages <-chan *notifications.Message, group *sync.WaitGroup) { + defer group.Done() + for { + select { + case <-ctx.Done(): + log.C(ctx).Info("Context cancelled. Terminating reconciler.") + return + case m, ok := <-messages: + if !ok { + log.C(ctx).Info("Messages channel closed. Terminating reconciler.") + return + } + log.C(ctx).Debugf("Reconciler received message %+v", m) + if m.Resync { + // discard any pending change notifications as we will do a full resync + drain(messages) + r.Resyncer.Resync(ctx) + } else { + r.Consumer.Consume(ctx, m.Notification) + } + } + } +} + +func drain(messages <-chan *notifications.Message) { + for { + select { + case _, ok := <-messages: + if !ok { + return + } + default: + return + } + } +} diff --git a/pkg/sbproxy/reconcile/reconciler_test.go b/pkg/sbproxy/reconcile/reconciler_test.go new file mode 100644 index 00000000..ff29baea --- /dev/null +++ b/pkg/sbproxy/reconcile/reconciler_test.go @@ -0,0 +1,174 @@ +/* + * Copyright 2018 The Service Manager Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reconcile_test + +import ( + "context" + "sync" + + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/reconcile" + + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications" + "github.com/Peripli/service-manager/pkg/types" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Reconciler", func() { + Describe("Process", func() { + var ( + wg *sync.WaitGroup + reconciler *reconcile.Reconciler + messages chan *notifications.Message + ctx context.Context + cancel context.CancelFunc + resyncer *fakeResyncer + consumer *fakeConsumer + ) + + BeforeEach(func() { + ctx, cancel = context.WithCancel(context.Background()) + wg = &sync.WaitGroup{} + resyncer = &fakeResyncer{} + consumer = &fakeConsumer{} + + reconciler = &reconcile.Reconciler{ + Resyncer: resyncer, + Consumer: consumer, + } + messages = make(chan *notifications.Message, 10) + }) + + Context("when the context is canceled", func() { + It("quits", func(done Done) { + reconciler.Reconcile(ctx, messages, wg) + cancel() + wg.Wait() + close(done) + }, 0.1) + }) + + Context("when the messages channel is closed", func() { + It("quits", func(done Done) { + reconciler.Reconcile(ctx, messages, wg) + close(messages) + wg.Wait() + close(done) + }, 0.1) + }) + + Context("when the messages channel is closed after resync", func() { + It("quits", func(done Done) { + messages <- ¬ifications.Message{Resync: true} + close(messages) + reconciler.Reconcile(ctx, messages, wg) + wg.Wait() + close(done) + }, 0.1) + }) + + Context("when notifications are sent", func() { + It("applies them in the same order", func(done Done) { + ns := []*types.Notification{ + { + Resource: "/v1/service_brokers", + Type: "CREATED", + }, + { + Resource: "/v1/service_brokers", + Type: "DELETED", + }, + } + for _, n := range ns { + messages <- ¬ifications.Message{Notification: n} + } + close(messages) + reconciler.Reconcile(ctx, messages, wg) + wg.Wait() + Expect(consumer.consumedNotifications).To(Equal(ns)) + close(done) + }) + }) + + Context("when resync is sent", func() { + It("drops all remaining messages in the queue and processes all new messages", func(done Done) { + nCreated := &types.Notification{ + Resource: "/v1/service_brokers", + Type: "CREATED", + } + nDeleted := &types.Notification{ + Resource: "/v1/service_brokers", + Type: "DELETED", + } + nModified := &types.Notification{ + Resource: "/v1/service_brokers", + Type: "MODIFIED", + } + messages <- ¬ifications.Message{Notification: nCreated} + messages <- ¬ifications.Message{Resync: true} + messages <- ¬ifications.Message{Notification: nDeleted} + messages <- ¬ifications.Message{Resync: true} + reconciler.Reconcile(ctx, messages, wg) + + Eventually(consumer.GetConsumedNotifications).Should(Equal([]*types.Notification{nCreated})) + Expect(resyncer.GetResyncCount()).To(Equal(1)) + + messages <- ¬ifications.Message{Notification: nModified} + close(messages) + wg.Wait() + Expect(consumer.GetConsumedNotifications()).To(Equal([]*types.Notification{nCreated, nModified})) + close(done) + }) + }) + }) +}) + +type fakeResyncer struct { + mutex sync.Mutex + resyncCount int +} + +func (r *fakeResyncer) Resync(ctx context.Context) { + r.mutex.Lock() + defer r.mutex.Unlock() + r.resyncCount++ +} + +func (r *fakeResyncer) GetResyncCount() int { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.resyncCount +} + +type fakeConsumer struct { + mutex sync.Mutex + consumedNotifications []*types.Notification +} + +func (c *fakeConsumer) Consume(ctx context.Context, notification *types.Notification) { + c.mutex.Lock() + defer c.mutex.Unlock() + c.consumedNotifications = append(c.consumedNotifications, notification) +} + +func (c *fakeConsumer) GetConsumedNotifications() []*types.Notification { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.consumedNotifications +} diff --git a/pkg/sbproxy/reconcile/resyncer.go b/pkg/sbproxy/reconcile/resyncer.go new file mode 100644 index 00000000..462bb7d7 --- /dev/null +++ b/pkg/sbproxy/reconcile/resyncer.go @@ -0,0 +1,60 @@ +package reconcile + +import ( + "context" + + "github.com/patrickmn/go-cache" + + "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/sm" + + "github.com/Peripli/service-manager/pkg/log" + "github.com/gofrs/uuid" + "github.com/pkg/errors" +) + +// NewResyncer returns a resyncer that reconciles the state of the proxy brokers and visibilities +// in the platform to match the desired state provided by the Service Manager. +func NewResyncer(settings *Settings, platformClient platform.Client, smClient sm.Client, proxyPath string, cache *cache.Cache) Resyncer { + return &resyncJob{ + options: settings, + platformClient: platformClient, + smClient: smClient, + proxyPath: proxyPath, + cache: cache, + stats: make(map[string]interface{}), + } +} + +type resyncJob struct { + options *Settings + platformClient platform.Client + smClient sm.Client + proxyPath string + cache *cache.Cache + stats map[string]interface{} +} + +// Resync reconciles the state of the proxy brokers and visibilities at the platform +// with the brokers provided by the Service Manager +func (r *resyncJob) Resync(ctx context.Context) { + resyncContext, taskID, err := createResyncContext(ctx) + if err != nil { + log.C(ctx).WithError(err).Error("could not create resync job context") + return + } + log.C(ctx).Infof("STARTING resync job %s...", taskID) + r.processBrokers(resyncContext) + r.processVisibilities(resyncContext) + + log.C(ctx).Infof("FINISHED resync job %s", taskID) +} + +func createResyncContext(ctx context.Context) (context.Context, string, error) { + correlationID, err := uuid.NewV4() + if err != nil { + return nil, "", errors.Wrap(err, "could not generate correlationID") + } + entry := log.C(ctx).WithField(log.FieldCorrelationID, correlationID.String()) + return log.ContextWithLogger(ctx, entry), correlationID.String(), nil +} diff --git a/pkg/sbproxy/sbproxy.go b/pkg/sbproxy/sbproxy.go index 7b7362e2..01fb5797 100644 --- a/pkg/sbproxy/sbproxy.go +++ b/pkg/sbproxy/sbproxy.go @@ -1,8 +1,32 @@ +/* + * Copyright 2019 The Service Manager Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package sbproxy import ( "sync" + "github.com/Peripli/service-manager/pkg/types" + + "github.com/patrickmn/go-cache" + + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications/handlers" + + "fmt" + "github.com/Peripli/service-broker-proxy/pkg/filter" "github.com/Peripli/service-broker-proxy/pkg/logging" "github.com/Peripli/service-manager/api/healthcheck" @@ -10,15 +34,13 @@ import ( "github.com/Peripli/service-manager/pkg/log" secfilters "github.com/Peripli/service-manager/pkg/security/filters" "github.com/Peripli/service-manager/pkg/util" - cache "github.com/patrickmn/go-cache" - - "fmt" "context" "time" "github.com/Peripli/service-broker-proxy/pkg/osb" "github.com/Peripli/service-broker-proxy/pkg/platform" + "github.com/Peripli/service-broker-proxy/pkg/sbproxy/notifications" "github.com/Peripli/service-broker-proxy/pkg/sbproxy/reconcile" "github.com/Peripli/service-broker-proxy/pkg/sm" "github.com/Peripli/service-manager/api/filters" @@ -26,7 +48,6 @@ import ( "github.com/Peripli/service-manager/pkg/env" "github.com/Peripli/service-manager/pkg/server" "github.com/Peripli/service-manager/pkg/web" - "github.com/robfig/cron" "github.com/spf13/pflag" ) @@ -47,20 +68,22 @@ const ( // controllers before running SMProxy. type SMProxyBuilder struct { *web.API - *cron.Cron - ctx context.Context - cfg *Settings - group *sync.WaitGroup + ctx context.Context + cfg *Settings + group *sync.WaitGroup + reconciler *reconcile.Reconciler + notificationsProducer *notifications.Producer } // SMProxy struct type SMProxy struct { *server.Server - scheduler *cron.Cron - ctx context.Context - group *sync.WaitGroup + ctx context.Context + group *sync.WaitGroup + reconciler *reconcile.Reconciler + notificationsProducer *notifications.Producer } // DefaultEnv creates a default environment that can be used to boot up a Service Broker proxy @@ -117,24 +140,38 @@ func New(ctx context.Context, cancel context.CancelFunc, env env.Environment, pl panic(err) } - var group sync.WaitGroup - c := cache.New(cfg.Reconcile.CacheExpiration, cacheCleanupInterval) - regJob := reconcile.NewTask(ctx, cfg.Reconcile, &group, platformClient, smClient, cfg.Reconcile.URL+APIPrefix, c) - - resyncSchedule := "@every " + cfg.Sm.ResyncPeriod.String() - log.C(ctx).Info("Brokers and Access resync schedule: ", resyncSchedule) - - cronScheduler := cron.New() - if err := cronScheduler.AddJob(resyncSchedule, regJob); err != nil { + notificationsProducer, err := notifications.NewProducer(cfg.Producer, cfg.Sm) + if err != nil { panic(err) } - + cache := cache.New(cfg.Reconcile.CacheExpiration, cacheCleanupInterval) + proxyPath := cfg.Reconcile.URL + APIPrefix + resyncer := reconcile.NewResyncer(cfg.Reconcile, platformClient, smClient, proxyPath, cache) + consumer := ¬ifications.Consumer{ + Handlers: map[types.ObjectType]notifications.ResourceNotificationHandler{ + types.ServiceBrokerType: &handlers.BrokerResourceNotificationsHandler{ + BrokerClient: platformClient.Broker(), + ProxyPrefix: cfg.Reconcile.BrokerPrefix, + ProxyPath: proxyPath, + }, + types.VisibilityType: &handlers.VisibilityResourceNotificationsHandler{ + VisibilityClient: platformClient.Visibility(), + ProxyPrefix: cfg.Reconcile.BrokerPrefix, + }, + }, + } + reconciler := &reconcile.Reconciler{ + Resyncer: resyncer, + Consumer: consumer, + } + var group sync.WaitGroup return &SMProxyBuilder{ - API: api, - Cron: cronScheduler, - ctx: ctx, - cfg: cfg, - group: &group, + API: api, + ctx: ctx, + cfg: cfg, + group: &group, + reconciler: reconciler, + notificationsProducer: notificationsProducer, } } @@ -146,10 +183,11 @@ func (smb *SMProxyBuilder) Build() *SMProxy { srv.Use(filters.NewRecoveryMiddleware()) return &SMProxy{ - Server: srv, - scheduler: smb.Cron, - ctx: smb.ctx, - group: smb.group, + Server: srv, + ctx: smb.ctx, + group: smb.group, + reconciler: smb.reconciler, + notificationsProducer: smb.notificationsProducer, } } @@ -162,12 +200,14 @@ func (smb *SMProxyBuilder) installHealth() { // Run starts the proxy func (p *SMProxy) Run() { defer waitWithTimeout(p.ctx, p.group, p.Server.Config.ShutdownTimeout) - p.scheduler.Start() - defer p.scheduler.Stop() + + messages := p.notificationsProducer.Start(p.ctx, p.group) + p.reconciler.Reconcile(p.ctx, messages, p.group) log.C(p.ctx).Info("Running SBProxy...") + p.Server.Run(p.ctx, p.group) - p.Server.Run(p.ctx) + p.group.Wait() } // waitWithTimeout waits for a WaitGroup to finish for a certain duration and times out afterwards @@ -180,7 +220,7 @@ func waitWithTimeout(ctx context.Context, group *sync.WaitGroup, timeout time.Du }() select { case <-c: - log.C(ctx).Debug(fmt.Sprintf("Timeout WaitGroup %+v finished successfully", group)) + log.C(ctx).Debugf("Timeout WaitGroup %+v finished successfully", group) case <-time.After(timeout): log.C(ctx).Fatal("Shutdown took more than ", timeout) close(c) diff --git a/pkg/sm/client_test.go b/pkg/sm/client_test.go index f2e019e8..916a60ff 100644 --- a/pkg/sm/client_test.go +++ b/pkg/sm/client_test.go @@ -59,14 +59,14 @@ var _ = Describe("Client", func() { BeforeEach(func() { settings = &Settings{ - User: "admin", - Password: "admin", - URL: "http://example.com", - OSBAPIPath: "/osb", - RequestTimeout: 5, - ResyncPeriod: 5, - SkipSSLValidation: false, - Transport: nil, + User: "admin", + Password: "admin", + URL: "http://example.com", + OSBAPIPath: "/osb", + NotificationsAPIPath: "/v1/notifications", + RequestTimeout: 5, + SkipSSLValidation: false, + Transport: nil, } }) @@ -113,14 +113,14 @@ var _ = Describe("Client", func() { newClient := func(t *testCase) *ServiceManagerClient { client, err := NewClient(&Settings{ - User: "admin", - Password: "admin", - URL: "http://example.com", - OSBAPIPath: "/osb", - RequestTimeout: 2 * time.Second, - ResyncPeriod: 5 * time.Second, - SkipSSLValidation: false, - Transport: httpClient(t.reaction, t.expectations), + User: "admin", + Password: "admin", + URL: "http://example.com", + OSBAPIPath: "/osb", + NotificationsAPIPath: "/v1/notifications", + RequestTimeout: 2 * time.Second, + SkipSSLValidation: false, + Transport: httpClient(t.reaction, t.expectations), }) Expect(err).ShouldNot(HaveOccurred()) return client diff --git a/pkg/sm/options.go b/pkg/sm/options.go index aca7e54f..85ed8106 100644 --- a/pkg/sm/options.go +++ b/pkg/sm/options.go @@ -19,22 +19,23 @@ package sm import ( "time" + "net/http" + "github.com/Peripli/service-manager/pkg/env" "github.com/pkg/errors" - "net/http" ) // DefaultSettings builds a default Service Manager Settings func DefaultSettings() *Settings { return &Settings{ - User: "", - Password: "", - URL: "", - OSBAPIPath: "", - RequestTimeout: 5 * time.Second, - ResyncPeriod: 5 * time.Minute, - SkipSSLValidation: false, - Transport: nil, + User: "", + Password: "", + URL: "", + OSBAPIPath: "/v1/osb", + NotificationsAPIPath: "/v1/notifications", + RequestTimeout: 5 * time.Second, + SkipSSLValidation: false, + Transport: nil, } } @@ -53,13 +54,13 @@ func NewSettings(env env.Environment) (*Settings, error) { // Settings type holds SM Client config properties type Settings struct { - User string - Password string - URL string - OSBAPIPath string `mapstructure:"osb_api_path"` - RequestTimeout time.Duration `mapstructure:"request_timeout"` - ResyncPeriod time.Duration `mapstructure:"resync_period"` - SkipSSLValidation bool `mapstructure:"skip_ssl_validation"` + User string + Password string + URL string + OSBAPIPath string `mapstructure:"osb_api_path"` + NotificationsAPIPath string `mapstructure:"notifications_api_path"` + RequestTimeout time.Duration `mapstructure:"request_timeout"` + SkipSSLValidation bool `mapstructure:"skip_ssl_validation"` Transport http.RoundTripper } @@ -78,10 +79,10 @@ func (c *Settings) Validate() error { if len(c.OSBAPIPath) == 0 { return errors.New("SM configuration OSB API Path missing") } - if c.RequestTimeout == 0 { - return errors.New("SM configuration RequestTimeout missing") + if c.NotificationsAPIPath == "" { + return errors.New("SM configuration Notifications API path must be non-empty string") } - if c.ResyncPeriod == 0 { + if c.RequestTimeout == 0 { return errors.New("SM configuration RequestTimeout missing") } return nil diff --git a/pkg/sm/options_test.go b/pkg/sm/options_test.go index f9f79a6d..ed9afcd7 100644 --- a/pkg/sm/options_test.go +++ b/pkg/sm/options_test.go @@ -18,11 +18,12 @@ package sm import ( "fmt" + "time" + "github.com/Peripli/service-manager/pkg/env/envfakes" "github.com/fatih/structs" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "time" ) var _ = Describe("Config", func() { @@ -67,7 +68,6 @@ var _ = Describe("Config", func() { Password: "admin", OSBAPIPath: "/osb", RequestTimeout: 5 * time.Second, - ResyncPeriod: 5 * time.Minute, SkipSSLValidation: true, Transport: nil, } @@ -158,9 +158,9 @@ var _ = Describe("Config", func() { }) }) - Context("when resync period is missing", func() { - It("returns an error", func() { - config.ResyncPeriod = 0 + Context("when NotificationsAPIPath is empty", func() { + It("returns and error", func() { + config.NotificationsAPIPath = "" assertErrorDuringValidate() }) }) diff --git a/pkg/sm/types.go b/pkg/sm/types.go index 002aa53c..2ab87623 100644 --- a/pkg/sm/types.go +++ b/pkg/sm/types.go @@ -29,10 +29,9 @@ type Brokers struct { // Broker type used for responses from the Service Manager client type Broker struct { - ID string `json:"id"` - Name string `json:"name"` - BrokerURL string `json:"broker_url"` - + ID string `json:"id"` + Name string `json:"name"` + BrokerURL string `json:"broker_url"` ServiceOfferings []types.ServiceOffering `json:"services"` Metadata map[string]json.RawMessage `json:"metadata,omitempty"` }