diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d334b2aca4..827a3b51b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: go-build run: go build "./..." @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - run: go install github.com/google/addlicense@latest - run: addlicense -check -f licenses/addlicense.tmpl . @@ -49,13 +49,13 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 cache: false - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.55.0 - args: --config=.golangci-strict.yml --timeout=3m + version: v1.55.2 + args: --config=.golangci-strict.yml test: runs-on: ${{ matrix.os }} @@ -77,7 +77,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Build run: go build -v "./..." - name: Run Tests @@ -97,7 +97,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Run Tests run: go test -race -timeout=30m -count=1 -json -v "./..." | tee test.json | jq -s -jr 'sort_by(.Package,.Time) | .[].Output | select (. != null )' shell: bash @@ -137,7 +137,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Build run: go build -v "./tools/stats-definition-exporter" - name: Run Tests diff --git a/.gitignore b/.gitignore index 41723989c2..1d36337c0c 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ __pycache__ ### Couchbase Plugin ### .cbcache/ + +planPIndexes/ diff --git a/.golangci-strict.yml b/.golangci-strict.yml index ea0427391b..2a28db4557 100644 --- a/.golangci-strict.yml +++ b/.golangci-strict.yml @@ -8,6 +8,9 @@ # config file for golangci-lint +run: + timeout: 3m + linters: enable: #- bodyclose # checks whether HTTP response body is closed successfully @@ -24,7 +27,7 @@ linters: #- nakedret # Finds naked returns in functions greater than a specified function length #- prealloc # Finds slice declarations that could potentially be preallocated #- revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - #- staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - unconvert # Remove unnecessary type conversions #- unparam # Reports unused function parameters @@ -67,7 +70,6 @@ linters: - nakedret # Finds naked returns in functions greater than a specified function length - prealloc # Finds slice declarations that could potentially be preallocated - revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - - staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks - structcheck # Finds unused struct fields - unparam # Reports unused function parameters - varcheck # Finds unused global variables and constants diff --git a/.golangci.yml b/.golangci.yml index b0cf702de9..f02eb91185 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,6 +8,9 @@ # config file for golangci-lint +run: + timeout: 3m + linters: enable: - bodyclose # checks whether HTTP response body is closed successfully diff --git a/Jenkinsfile b/Jenkinsfile index 491f8dacf5..58d4f0da3c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,7 +16,7 @@ pipeline { } tools { - go '1.21.3' + go '1.21.4' } stages { diff --git a/auth/auth_test.go b/auth/auth_test.go index b69c35ce9c..b34d0f7c13 100644 --- a/auth/auth_test.go +++ b/auth/auth_test.go @@ -721,7 +721,7 @@ func TestConcurrentUserWrites(t *testing.T) { } // Retrieve user to trigger initial calculation of roles, channels - user, getErr := auth.GetUser(username) + _, getErr := auth.GetUser(username) require.NoError(t, getErr, "Error retrieving user") require.NoError(t, auth.SetBcryptCost(DefaultBcryptCost)) diff --git a/auth/oidc_test.go b/auth/oidc_test.go index 7c95030262..1d5930fa7a 100644 --- a/auth/oidc_test.go +++ b/auth/oidc_test.go @@ -1205,9 +1205,8 @@ func TestJWTRolesChannels(t *testing.T) { for i, login := range tc.logins { var ( - user User - err error - lastUpdateTime time.Time + user User + err error ) if i == 0 { user, err = auth.NewUser(testUserPrefix+"_"+testSubject, "test", base.SetFromArray(login.explicitChannels)) @@ -1254,8 +1253,6 @@ func TestJWTRolesChannels(t *testing.T) { } require.Equal(t, base.SetFromArray(login.expectedChannels), user.Channels().AsSet()) - require.Greater(t, user.JWTLastUpdated(), lastUpdateTime) - lastUpdateTime = user.JWTLastUpdated() } }) } diff --git a/auth/password_hash_test.go b/auth/password_hash_test.go index 0dacc1a04c..1180402c97 100644 --- a/auth/password_hash_test.go +++ b/auth/password_hash_test.go @@ -35,27 +35,30 @@ func BenchmarkBcryptCostTimes(b *testing.B) { for i := minCostToTest; i < maxCostToTest; i++ { b.Run(fmt.Sprintf("cost%d", i), func(bn *testing.B) { - bn.N = 1 _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), i) assert.NoError(bn, err) }) } } -// TestBcryptDefaultCostTime will ensure that the default bcrypt cost takes at least a 'reasonable' amount of time -// If this test fails, it suggests maybe we need to think about increasing the default cost... -func TestBcryptDefaultCostTime(t *testing.T) { - // Modest 2.2GHz macbook i7 takes ~80ms at cost 10 - // Assume server CPUs are ~2x faster - minimumDuration := 40 * time.Millisecond +// TestBcryptCostTimes will output the time it takes to hash a password with each bcrypt cost value +func TestBcryptCostTimes(t *testing.T) { + // Little value in running this regularly. Might be useful for one-off informational purposes + t.Skip("Test disabled") - startTime := time.Now() - _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), DefaultBcryptCost) - duration := time.Since(startTime) + minCostToTest := bcrypt.DefaultCost + maxCostToTest := bcrypt.DefaultCost + 5 - t.Logf("bcrypt.GenerateFromPassword with cost %d took: %v", DefaultBcryptCost, duration) - assert.NoError(t, err) - assert.True(t, minimumDuration < duration) + for i := minCostToTest; i < maxCostToTest; i++ { + t.Run(fmt.Sprintf("cost%d", i), func(t *testing.T) { + startTime := time.Now() + _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), i) + duration := time.Since(startTime) + + t.Logf("bcrypt.GenerateFromPassword with cost %d took: %v", i, duration) + assert.NoError(t, err) + }) + } } func TestSetBcryptCost(t *testing.T) { diff --git a/base/collection.go b/base/collection.go index fee0fcb8ef..35511d841d 100644 --- a/base/collection.go +++ b/base/collection.go @@ -63,10 +63,6 @@ func GetGoCBv2Bucket(ctx context.Context, spec BucketSpec) (*GocbV2Bucket, error RetryStrategy: gocb.NewBestEffortRetryStrategy(nil), } - if spec.KvPoolSize > 0 { - // TODO: Equivalent of kvPoolSize in gocb v2? - } - cluster, err := gocb.Connect(connString, clusterOptions) if err != nil { InfofCtx(ctx, KeyAuth, "Unable to connect to cluster: %v", err) diff --git a/base/dcp_sharded.go b/base/dcp_sharded.go index 69acebd035..bdd915eb4f 100644 --- a/base/dcp_sharded.go +++ b/base/dcp_sharded.go @@ -28,7 +28,7 @@ const DefaultImportPartitions = 16 const DefaultImportPartitionsServerless = 6 // firstVersionToSupportCollections represents the earliest Sync Gateway release that supports collections. -var firstVersionToSupportCollections = &ComparableVersion{ +var firstVersionToSupportCollections = &ComparableBuildVersion{ epoch: 0, major: 3, minor: 1, @@ -38,7 +38,7 @@ var firstVersionToSupportCollections = &ComparableVersion{ // nodeExtras is the contents of the JSON value of the cbgt.NodeDef.Extras field as used by Sync Gateway. type nodeExtras struct { // Version is the node's version. - Version *ComparableVersion `json:"v"` + Version *ComparableBuildVersion `json:"v"` } // CbgtContext holds the two handles we have for CBGT-related functionality. @@ -376,7 +376,7 @@ func (c *CbgtContext) StartManager(ctx context.Context, dbName string, configGro // getNodeVersion returns the version of the node from its Extras field, or nil if none is stored. Returns an error if // the extras could not be parsed. -func getNodeVersion(def *cbgt.NodeDef) (*ComparableVersion, error) { +func getNodeVersion(def *cbgt.NodeDef) (*ComparableBuildVersion, error) { if len(def.Extras) == 0 { return nil, nil } @@ -388,7 +388,7 @@ func getNodeVersion(def *cbgt.NodeDef) (*ComparableVersion, error) { } // getMinNodeVersion returns the version of the oldest node currently in the cluster. -func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableVersion, error) { +func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableBuildVersion, error) { nodes, _, err := cbgt.CfgGetNodeDefs(cfg, cbgt.NODE_DEFS_KNOWN) if err != nil { return nil, err @@ -397,14 +397,14 @@ func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableVersion, error) { // If there are no nodes at all, it's likely we're the first node in the cluster. return ProductVersion, nil } - var minVersion *ComparableVersion + var minVersion *ComparableBuildVersion for _, node := range nodes.NodeDefs { nodeVersion, err := getNodeVersion(node) if err != nil { return nil, fmt.Errorf("failed to get version of node %v: %w", MD(node.HostPort).Redact(), err) } if nodeVersion == nil { - nodeVersion = zeroComparableVersion() + nodeVersion = zeroComparableBuildVersion() } if minVersion == nil || nodeVersion.Less(minVersion) { minVersion = nodeVersion @@ -678,11 +678,11 @@ func (meh *sgMgrEventHandlers) OnUnregisterPIndex(pindex *cbgt.PIndex) { // OnFeedError is required to trigger reconnection to a feed on a closed connection (EOF). // NotifyMgrOnClose will trigger cbgt closing and then attempt to reconnect to the feed, if the manager hasn't // been stopped. -func (meh *sgMgrEventHandlers) OnFeedError(srcType string, r cbgt.Feed, feedErr error) { +func (meh *sgMgrEventHandlers) OnFeedError(_ string, r cbgt.Feed, feedErr error) { // cbgt always passes srcType = SOURCE_GOCBCORE, but we have a wrapped type associated with our indexes - use that instead // for our logging - srcType = SOURCE_DCP_SG + srcType := SOURCE_DCP_SG var bucketName, bucketUUID string dcpFeed, ok := r.(cbgt.FeedEx) if ok { diff --git a/base/leaky_bucket.go b/base/leaky_bucket.go index 7a9ddb921c..988f86be3b 100644 --- a/base/leaky_bucket.go +++ b/base/leaky_bucket.go @@ -58,9 +58,9 @@ func (b *LeakyBucket) SetIgnoreClose(value bool) { b.config.IgnoreClose = value } -func (b *LeakyBucket) CloseAndDelete() error { +func (b *LeakyBucket) CloseAndDelete(ctx context.Context) error { if bucket, ok := b.bucket.(sgbucket.DeleteableStore); ok { - return bucket.CloseAndDelete() + return bucket.CloseAndDelete(ctx) } return nil } @@ -358,8 +358,7 @@ func dedupeTapEvents(tapEvents []sgbucket.FeedEvent) []sgbucket.FeedEvent { // sequence order as read off the feed. deduped := []sgbucket.FeedEvent{} for _, tapEvent := range tapEvents { - key := string(tapEvent.Key) - latestTapEventForKey := latestTapEventPerKey[key] + latestTapEventForKey := latestTapEventPerKey[string(tapEvent.Key)] if tapEvent.Cas == latestTapEventForKey.Cas { deduped = append(deduped, tapEvent) } diff --git a/base/logger_external.go b/base/logger_external.go index 1589f0128f..1130ee917d 100644 --- a/base/logger_external.go +++ b/base/logger_external.go @@ -46,7 +46,8 @@ func initExternalLoggers() { } func updateExternalLoggers() { - if consoleLogger != nil && consoleLogger.shouldLog(nil, LevelDebug, KeyWalrus) { + // use context.Background() since this is called from init or to reset test logging + if consoleLogger != nil && consoleLogger.shouldLog(context.Background(), LevelDebug, KeyWalrus) { rosmar.SetLogLevel(rosmar.LevelDebug) } else { rosmar.SetLogLevel(rosmar.LevelInfo) diff --git a/base/logging_test.go b/base/logging_test.go index c88684a6c7..fd50db33a0 100644 --- a/base/logging_test.go +++ b/base/logging_test.go @@ -11,7 +11,6 @@ package base import ( "bytes" "fmt" - "math/rand" "os" "path/filepath" "runtime" @@ -79,9 +78,7 @@ func BenchmarkLogRotation(b *testing.B) { for _, test := range tests { b.Run(fmt.Sprintf("rotate:%t-compress:%t-bytes:%v", test.rotate, test.compress, test.numBytes), func(bm *testing.B) { - data := make([]byte, test.numBytes) - _, err := rand.Read(data) - require.NoError(bm, err) + data := FastRandBytes(bm, test.numBytes) logPath := b.TempDir() logger := lumberjack.Logger{Filename: filepath.Join(logPath, "output.log"), Compress: test.compress} @@ -99,7 +96,7 @@ func BenchmarkLogRotation(b *testing.B) { // we can't remove temp dir while the async compression is still writing log files assert.NoError(bm, logger.Close()) ctx := TestCtx(bm) - err, _ = RetryLoop(ctx, "benchmark-logrotate-teardown", + err, _ := RetryLoop(ctx, "benchmark-logrotate-teardown", func() (shouldRetry bool, err error, value interface{}) { err = os.RemoveAll(logPath) return err != nil, err, nil diff --git a/base/main_test_bucket_pool.go b/base/main_test_bucket_pool.go index 4789bc5f52..8c2d80d37d 100644 --- a/base/main_test_bucket_pool.go +++ b/base/main_test_bucket_pool.go @@ -198,29 +198,17 @@ func (tbp *TestBucketPool) GetWalrusTestBucket(t testing.TB, url string) (b Buck require.NoError(t, err) var walrusBucket *rosmar.Bucket - var typeName string + const typeName = "rosmar" bucketName := tbpBucketNamePrefix + "rosmar_" + id if url == "walrus:" || url == rosmar.InMemoryURL { - walrusBucket, err = rosmar.OpenBucket(url, rosmar.CreateOrOpen) - if err == nil { - err := walrusBucket.SetName(bucketName) - if err != nil { - tbp.Fatalf(testCtx, "Could not set name %s for rosmar bucket: %s", bucketName, err) - } - } + walrusBucket, err = rosmar.OpenBucket(url, bucketName, rosmar.CreateOrOpen) } else { walrusBucket, err = rosmar.OpenBucketIn(url, bucketName, rosmar.CreateOrOpen) } - typeName = "rosmar" if err != nil { tbp.Fatalf(testCtx, "couldn't get %s bucket from <%s>: %v", typeName, url, err) } - err = walrusBucket.SetName(bucketName) - if err != nil { - tbp.Fatalf(testCtx, "Could not set name %s for rosmar bucket: %s", bucketName, err) - } - // Wrap Walrus buckets with a leaky bucket to support vbucket IDs on feed. b = &LeakyBucket{bucket: walrusBucket, config: &LeakyBucketConfig{TapFeedVbuckets: true}} @@ -258,14 +246,10 @@ func (tbp *TestBucketPool) GetWalrusTestBucket(t testing.TB, url string) (b Buck atomic.AddInt32(&tbp.stats.NumBucketsClosed, 1) atomic.AddInt64(&tbp.stats.TotalInuseBucketNano, time.Since(openedStart).Nanoseconds()) tbp.markBucketClosed(t, b) - if url == kTestWalrusURL { - b.Close(ctx) - } else { - // Persisted buckets should call close and delete - closeErr := walrusBucket.CloseAndDelete() - if closeErr != nil { - tbp.Logf(ctx, "Unexpected error closing persistent %s bucket: %v", typeName, closeErr) - } + // Persisted buckets should call close and delete + closeErr := walrusBucket.CloseAndDelete(ctx) + if closeErr != nil { + tbp.Logf(ctx, "Unexpected error closing persistent %s bucket: %v", typeName, closeErr) } } diff --git a/base/util_testing.go b/base/util_testing.go index bdae598540..5b76c94df1 100644 --- a/base/util_testing.go +++ b/base/util_testing.go @@ -42,12 +42,6 @@ import ( var TestExternalRevStorage = false -func init() { - - // Prevent https://issues.couchbase.com/browse/MB-24237 - rand.Seed(time.Now().UTC().UnixNano()) -} - type TestBucket struct { Bucket BucketSpec BucketSpec @@ -108,12 +102,6 @@ func GetTestBucket(t testing.TB) *TestBucket { return getTestBucket(t, false) } -// GetTestBucket returns a test bucket from a pool. If running with walrus buckets, will persist bucket data -// across bucket close. -func GetPersistentTestBucket(t testing.TB) *TestBucket { - return getTestBucket(t, true) -} - // getTestBucket returns a bucket from the bucket pool. Persistent flag determines behaviour for walrus // buckets only - Couchbase bucket behaviour is defined by the bucket pool readier/init. func getTestBucket(t testing.TB, persistent bool) *TestBucket { @@ -210,29 +198,6 @@ func rosmarUriFromPath(path string) string { return uri + strings.ReplaceAll(path, `\`, `/`) } -// Gets a Walrus bucket which will be persisted to a temporary directory -// Returns both the test bucket which is persisted and a function which can be used to remove the created temporary -// directory once the test has finished with it. -func GetPersistentWalrusBucket(t testing.TB) (*TestBucket, func()) { - tempDir, err := os.MkdirTemp("", "walrustemp") - require.NoError(t, err) - - bucket, spec, closeFn := GTestBucketPool.GetWalrusTestBucket(t, rosmarUriFromPath(tempDir)) - - // Return this separate to closeFn as we want to avoid this being removed on database close (/_offline handling) - removeFileFunc := func() { - err := os.RemoveAll(tempDir) - require.NoError(t, err) - } - - return &TestBucket{ - Bucket: bucket, - BucketSpec: spec, - closeFn: closeFn, - t: t, - }, removeFileFunc -} - // Should Sync Gateway use XATTRS functionality when running unit tests? func TestUseXattrs() bool { useXattrs, isSet := os.LookupEnv(TestEnvSyncGatewayUseXattrs) @@ -929,3 +894,12 @@ func MoveDocument(t testing.TB, docID string, dst, src DataStore) { _, err = src.Remove(docID, srcCAS) require.NoError(t, err) } + +// FastRandBytes returns a set of random bytes. Uses a low quality random generator. +func FastRandBytes(t testing.TB, size int) []byte { + b := make([]byte, size) + // staticcheck wants to use crypto/rand as math/rand is deprecated in go 1.20, but we don't need that for testing + _, err := rand.Read(b) // nolint:staticcheck + require.NoError(t, err) + return b +} diff --git a/base/version.go b/base/version.go index aff28382c2..531d30f92d 100644 --- a/base/version.go +++ b/base/version.go @@ -27,7 +27,7 @@ const ( // populated via init() below var ( // ProductVersion describes the specific version information of the build. - ProductVersion *ComparableVersion + ProductVersion *ComparableBuildVersion // VersionString appears in the "Server:" header of HTTP responses. // CBL 1.x parses the header to determine whether it's talking to Sync Gateway (vs. CouchDB) and what version. @@ -109,7 +109,7 @@ func init() { editionStr = productEditionShortName var err error - ProductVersion, err = NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) + ProductVersion, err = NewComparableBuildVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) if err != nil { panic(err) } diff --git a/base/version_comparable.go b/base/version_comparable_build.go similarity index 63% rename from base/version_comparable.go rename to base/version_comparable_build.go index 27f212382d..541761d52d 100644 --- a/base/version_comparable.go +++ b/base/version_comparable_build.go @@ -15,22 +15,22 @@ import ( ) const ( - // comparableVersionEpoch can be incremented when the versioning system or string format changes, whilst maintaining ordering. + // comparableBuildVersionEpoch can be incremented when the versioning system or string format changes, whilst maintaining ordering. // i.e. It's a version number version // e.g: version system change from semver to dates: 0:30.2.1@45-EE < 1:22-3-25@33-EE - comparableVersionEpoch = 0 + comparableBuildVersionEpoch = 0 ) -// ComparableVersion is an [epoch:]major.minor.patch[.other][@build][-edition] version that has methods to reliably extract information. -type ComparableVersion struct { +// ComparableBuildVersion is an [epoch:]major.minor.patch[.other][@build][-edition] version that has methods to reliably extract information. +type ComparableBuildVersion struct { epoch, major, minor, patch, other uint8 build uint16 edition productEdition str string } -func zeroComparableVersion() *ComparableVersion { - v := &ComparableVersion{ +func zeroComparableBuildVersion() *ComparableBuildVersion { + v := &ComparableBuildVersion{ epoch: 0, major: 0, minor: 0, @@ -39,18 +39,18 @@ func zeroComparableVersion() *ComparableVersion { build: 0, edition: "", } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() return v } -// NewComparableVersionFromString parses a ComparableVersion from the given version string. +// NewComparableBuildVersionFromString parses a ComparableBuildVersion from the given version string. // Expected format: `[epoch:]major.minor.patch[.other][@build][-edition]` -func NewComparableVersionFromString(version string) (*ComparableVersion, error) { - epoch, major, minor, patch, other, build, edition, err := parseComparableVersion(version) +func NewComparableBuildVersionFromString(version string) (*ComparableBuildVersion, error) { + epoch, major, minor, patch, other, build, edition, err := parseComparableBuildVersion(version) if err != nil { return nil, err } - v := &ComparableVersion{ + v := &ComparableBuildVersion{ epoch: epoch, major: major, minor: minor, @@ -59,20 +59,20 @@ func NewComparableVersionFromString(version string) (*ComparableVersion, error) build: build, edition: edition, } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() if v.str != version { return nil, fmt.Errorf("version string %q is not equal to formatted version string %q", version, v.str) } return v, nil } -func NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (*ComparableVersion, error) { - _, major, minor, patch, other, build, edition, err := parseComparableVersionComponents("", majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) +func NewComparableBuildVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (*ComparableBuildVersion, error) { + _, major, minor, patch, other, build, edition, err := parseComparableBuildVersionComponents("", majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) if err != nil { return nil, err } - v := &ComparableVersion{ - epoch: comparableVersionEpoch, + v := &ComparableBuildVersion{ + epoch: comparableBuildVersionEpoch, major: major, minor: minor, patch: patch, @@ -80,12 +80,12 @@ func NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, edit build: build, edition: edition, } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() return v, nil } // Equal returns true if pv is equal to b -func (pv *ComparableVersion) Equal(b *ComparableVersion) bool { +func (pv *ComparableBuildVersion) Equal(b *ComparableBuildVersion) bool { return pv.epoch == b.epoch && pv.major == b.major && pv.minor == b.minor && @@ -96,7 +96,7 @@ func (pv *ComparableVersion) Equal(b *ComparableVersion) bool { } // Less returns true if a is less than b -func (a *ComparableVersion) Less(b *ComparableVersion) bool { +func (a *ComparableBuildVersion) Less(b *ComparableBuildVersion) bool { if a.epoch < b.epoch { return true } else if a.epoch > b.epoch { @@ -138,7 +138,7 @@ func (a *ComparableVersion) Less(b *ComparableVersion) bool { } // AtLeastMinorDowngrade returns true there is a major or minor downgrade from a to b. -func (a *ComparableVersion) AtLeastMinorDowngrade(b *ComparableVersion) bool { +func (a *ComparableBuildVersion) AtLeastMinorDowngrade(b *ComparableBuildVersion) bool { if a.epoch != b.epoch { return a.epoch > b.epoch } @@ -148,82 +148,82 @@ func (a *ComparableVersion) AtLeastMinorDowngrade(b *ComparableVersion) bool { return a.minor > b.minor } -func (pv ComparableVersion) String() string { +func (pv ComparableBuildVersion) String() string { return pv.str } -// MarshalJSON implements json.Marshaler for ComparableVersion. The JSON representation is the version string. -func (pv *ComparableVersion) MarshalJSON() ([]byte, error) { +// MarshalJSON implements json.Marshaler for ComparableBuildVersion. The JSON representation is the version string. +func (pv *ComparableBuildVersion) MarshalJSON() ([]byte, error) { return JSONMarshal(pv.String()) } -func (pv *ComparableVersion) UnmarshalJSON(val []byte) error { +func (pv *ComparableBuildVersion) UnmarshalJSON(val []byte) error { var strVal string err := JSONUnmarshal(val, &strVal) if err != nil { return err } if strVal != "" { - pv.epoch, pv.major, pv.minor, pv.patch, pv.other, pv.build, pv.edition, err = parseComparableVersion(strVal) + pv.epoch, pv.major, pv.minor, pv.patch, pv.other, pv.build, pv.edition, err = parseComparableBuildVersion(strVal) } - pv.str = pv.formatComparableVersion() + pv.str = pv.formatComparableBuildVersion() return err } const ( - comparableVersionSep = '.' - comparableVersionSepEpoch = ':' - comparableVersionSepBuild = '@' - comparableVersionSepEdition = '-' + comparableBuildVersionSep = '.' + comparableBuildVersionSepEpoch = ':' + comparableBuildVersionSepBuild = '@' + comparableBuildVersionSepEdition = '-' ) -// formatComparableVersion returns the string representation of the given version. +// formatComparableBuildVersion returns the string representation of the given version. // format: `[epoch:]major.minor.patch[.other][@build][-edition]` -func (pv *ComparableVersion) formatComparableVersion() string { +func (pv *ComparableBuildVersion) formatComparableBuildVersion() string { if pv == nil { return "0.0.0" } epochStr := "" if pv.epoch > 0 { - epochStr = strconv.FormatUint(uint64(pv.epoch), 10) + string(comparableVersionSepEpoch) + epochStr = strconv.FormatUint(uint64(pv.epoch), 10) + string(comparableBuildVersionSepEpoch) } semverStr := strconv.FormatUint(uint64(pv.major), 10) + - string(comparableVersionSep) + + string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.minor), 10) + - string(comparableVersionSep) + + string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.patch), 10) otherStr := "" if pv.other > 0 { - otherStr = string(comparableVersionSep) + + otherStr = string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.other), 10) } buildStr := "" if pv.build > 0 { - buildStr = string(comparableVersionSepBuild) + strconv.FormatUint(uint64(pv.build), 10) + buildStr = string(comparableBuildVersionSepBuild) + strconv.FormatUint(uint64(pv.build), 10) } editionStr := "" if ed := pv.edition.String(); ed != "" { - editionStr = string(comparableVersionSepEdition) + ed + editionStr = string(comparableBuildVersionSepEdition) + ed } return epochStr + semverStr + otherStr + buildStr + editionStr } -func parseComparableVersion(version string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { - epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr, err := extractComparableVersionComponents(version) +func parseComparableBuildVersion(version string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { + epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr, err := extractComparableBuildVersionComponents(version) if err != nil { return 0, 0, 0, 0, 0, 0, "", err } - return parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr) + return parseComparableBuildVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr) } -func parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { +func parseComparableBuildVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { if epochStr != "" { tmp, err := strconv.ParseUint(epochStr, 10, 8) if err != nil { @@ -282,8 +282,8 @@ func parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, ot return epoch, major, minor, patch, other, build, edition, nil } -// extractComparableVersionComponents takes a version string and returns each component as a string -func extractComparableVersionComponents(version string) (epoch, major, minor, patch, other, build, edition string, err error) { +// extractComparableBuildVersionComponents takes a version string and returns each component as a string +func extractComparableBuildVersionComponents(version string) (epoch, major, minor, patch, other, build, edition string, err error) { var remainder string @@ -291,18 +291,18 @@ func extractComparableVersionComponents(version string) (epoch, major, minor, pa // and still iterating over the entire string only once, albeit in small chunks. // prefixes - epoch, remainder = safeCutBefore(version, string(comparableVersionSepEpoch)) + epoch, remainder = safeCutBefore(version, string(comparableBuildVersionSepEpoch)) // suffixes - edition, remainder = safeCutAfter(remainder, string(comparableVersionSepEdition)) - build, remainder = safeCutAfter(remainder, string(comparableVersionSepBuild)) + edition, remainder = safeCutAfter(remainder, string(comparableBuildVersionSepEdition)) + build, remainder = safeCutAfter(remainder, string(comparableBuildVersionSepBuild)) // major.minor.patch[.other] - major, remainder = safeCutBefore(remainder, string(comparableVersionSep)) - minor, remainder = safeCutBefore(remainder, string(comparableVersionSep)) + major, remainder = safeCutBefore(remainder, string(comparableBuildVersionSep)) + minor, remainder = safeCutBefore(remainder, string(comparableBuildVersionSep)) // handle optional [.other] - if before, after, ok := strings.Cut(remainder, string(comparableVersionSep)); !ok { + if before, after, ok := strings.Cut(remainder, string(comparableBuildVersionSep)); !ok { patch = remainder } else { patch = before diff --git a/base/version_comparable_test.go b/base/version_comparable_build_test.go similarity index 79% rename from base/version_comparable_test.go rename to base/version_comparable_build_test.go index 55f8515625..80ebf6351d 100644 --- a/base/version_comparable_test.go +++ b/base/version_comparable_build_test.go @@ -15,9 +15,9 @@ import ( "github.com/stretchr/testify/require" ) -func TestComparableVersion(t *testing.T) { +func TestComparableBuildVersion(t *testing.T) { // An *ascending* list of valid versions (order is required for comparison testing) - testDataComparableVersions := []struct { + testDataComparableBuildVersions := []struct { str string }{ {"0.0.0"}, // min @@ -48,9 +48,9 @@ func TestComparableVersion(t *testing.T) { {"255:255.255.255.255@65535-EE"}, // max } - for i, test := range testDataComparableVersions { + for i, test := range testDataComparableBuildVersions { t.Run(test.str, func(t *testing.T) { - current, err := NewComparableVersionFromString(test.str) + current, err := NewComparableBuildVersionFromString(test.str) require.NoError(t, err) // string->version->string round-trip @@ -58,8 +58,8 @@ func TestComparableVersion(t *testing.T) { // comparisons (Less/Equal) if i > 1 { - prevStr := testDataComparableVersions[i-1].str - previous, err := NewComparableVersionFromString(prevStr) + prevStr := testDataComparableBuildVersions[i-1].str + previous, err := NewComparableBuildVersionFromString(prevStr) require.NoError(t, err) assert.Truef(t, previous.Less(current), "incorrect comparison: expected %q < %q", prevStr, test.str) @@ -70,8 +70,8 @@ func TestComparableVersion(t *testing.T) { } } -func TestInvalidComparableVersion(t *testing.T) { - // A list of invalid ComparableVersion +func TestInvalidComparableBuildVersion(t *testing.T) { + // A list of invalid ComparableBuildVersion tests := []struct { ver string }{ @@ -108,29 +108,29 @@ func TestInvalidComparableVersion(t *testing.T) { for _, test := range tests { t.Run(test.ver, func(t *testing.T) { - ver, err := NewComparableVersionFromString(test.ver) + ver, err := NewComparableBuildVersionFromString(test.ver) assert.Error(t, err) assert.Nil(t, ver) }) } } -func TestComparableVersionJSONRoundTrip(t *testing.T) { +func TestComparableBuildVersionJSONRoundTrip(t *testing.T) { json, err := JSONMarshal(ProductVersion) require.NoError(t, err) - var version ComparableVersion + var version ComparableBuildVersion err = JSONUnmarshal(json, &version) require.NoError(t, err) require.True(t, ProductVersion.Equal(&version)) require.Equal(t, ProductVersion.String(), version.String()) } -func TestComparableVersionEmptyStringJSON(t *testing.T) { - var version ComparableVersion +func TestComparableBuildVersionEmptyStringJSON(t *testing.T) { + var version ComparableBuildVersion err := JSONUnmarshal([]byte(`""`), &version) require.NoError(t, err) - require.True(t, zeroComparableVersion().Equal(&version)) - require.Equal(t, "0.0.0", zeroComparableVersion().String()) + require.True(t, zeroComparableBuildVersion().Equal(&version)) + require.Equal(t, "0.0.0", zeroComparableBuildVersion().String()) require.Equal(t, "0.0.0", version.String()) } @@ -224,30 +224,30 @@ func TestAtLeastMinorDowngradeVersion(t *testing.T) { for _, test := range testCases { t.Run(fmt.Sprintf("%s->%s", test.versionA, test.versionB), func(t *testing.T) { - versionA, err := NewComparableVersionFromString(test.versionA) + versionA, err := NewComparableBuildVersionFromString(test.versionA) require.NoError(t, err) - versionB, err := NewComparableVersionFromString(test.versionB) + versionB, err := NewComparableBuildVersionFromString(test.versionB) require.NoError(t, err) require.Equal(t, test.minorDowngrade, versionA.AtLeastMinorDowngrade(versionB)) }) } } -func BenchmarkComparableVersion(b *testing.B) { +func BenchmarkComparableBuildVersion(b *testing.B) { const str = "8:7.6.5.4@3-EE" - current, err := NewComparableVersionFromString(str) + current, err := NewComparableBuildVersionFromString(str) require.NoError(b, err) - b.Run("parseComparableVersion", func(b *testing.B) { + b.Run("parseComparableBuildVersion", func(b *testing.B) { for i := 0; i < b.N; i++ { - _, _, _, _, _, _, _, _ = parseComparableVersion(str) + _, _, _, _, _, _, _, _ = parseComparableBuildVersion(str) } }) - b.Run("formatComparableVersion", func(b *testing.B) { + b.Run("formatComparableBuildVersion", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = current.formatComparableVersion() + _ = current.formatComparableBuildVersion() } }) } diff --git a/db/active_replicator_pull.go b/db/active_replicator_pull.go index a6b9c7e105..c5f94c140c 100644 --- a/db/active_replicator_pull.go +++ b/db/active_replicator_pull.go @@ -40,10 +40,6 @@ func (apr *ActivePullReplicator) Start(ctx context.Context) error { apr.lock.Lock() defer apr.lock.Unlock() - if apr == nil { - return fmt.Errorf("nil ActivePullReplicator, can't start") - } - if apr.ctx != nil && apr.ctx.Err() == nil { return fmt.Errorf("ActivePullReplicator already running") } @@ -158,10 +154,6 @@ func (apr *ActivePullReplicator) _subChanges(collectionIdx *int, since string) e func (apr *ActivePullReplicator) Complete() { base.TracefCtx(apr.ctx, base.KeyReplicate, "ActivePullReplicator.Complete()") apr.lock.Lock() - if apr == nil { - apr.lock.Unlock() - return - } _ = apr.forEachCollection(func(c *activeReplicatorCollection) error { base.TracefCtx(apr.ctx, base.KeyReplicate, "Before calling waitForExpectedSequences in Complete()") if err := c.Checkpointer.waitForExpectedSequences(); err != nil { diff --git a/db/active_replicator_push.go b/db/active_replicator_push.go index 7dac4886d4..a6a21f1431 100644 --- a/db/active_replicator_push.go +++ b/db/active_replicator_push.go @@ -44,10 +44,6 @@ func (apr *ActivePushReplicator) Start(ctx context.Context) error { apr.lock.Lock() defer apr.lock.Unlock() - if apr == nil { - return fmt.Errorf("nil ActivePushReplicator, can't start") - } - if apr.ctx != nil && apr.ctx.Err() == nil { return fmt.Errorf("ActivePushReplicator already running") } @@ -111,10 +107,6 @@ func (apr *ActivePushReplicator) _connect() error { func (apr *ActivePushReplicator) Complete() { base.TracefCtx(apr.ctx, base.KeyReplicate, "ActivePushReplicator.Complete()") apr.lock.Lock() - if apr == nil { - apr.lock.Unlock() - return - } // Wait for any pending changes responses to arrive and be processed err := apr._waitForPendingChangesResponse() diff --git a/db/attachment_test.go b/db/attachment_test.go index 60042bd63e..a29ccfa7f4 100644 --- a/db/attachment_test.go +++ b/db/attachment_test.go @@ -15,7 +15,6 @@ import ( "errors" "fmt" "log" - "math/rand" "net/http" "strconv" "strings" @@ -1509,12 +1508,9 @@ func TestLargeAttachments(t *testing.T) { defer db.Close(ctx) collection := GetSingleDatabaseCollectionWithUser(t, db) - normalAttachment := make([]byte, 15*1024*1024) // permissible size - oversizeAttachment := make([]byte, 25*1024*1024) // memcached would send an E2BIG - hugeAttachment := make([]byte, 35*1024*1024) // memcached would abruptly close our connection - _, _ = rand.Read(normalAttachment) - _, _ = rand.Read(oversizeAttachment) - _, _ = rand.Read(hugeAttachment) + normalAttachment := base.FastRandBytes(t, 15*1024*1024) // permissible size + oversizeAttachment := base.FastRandBytes(t, 25*1024*1024) // memcached would send an E2BIG + hugeAttachment := base.FastRandBytes(t, 35*1024*1024) // memcached would abruptly close our connection _, _, err := collection.Put(ctx, "testdoc", Body{ "_attachments": AttachmentsMeta{ diff --git a/db/crud.go b/db/crud.go index 22db164449..02f24149bf 100644 --- a/db/crud.go +++ b/db/crud.go @@ -414,9 +414,6 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR // db.DbStats.StatsDeltaSync().Add(base.StatKeyDeltaCacheHits, 1) db.dbStats().DeltaSync().DeltaCacheHit.Add(1) return fromRevision.Delta, nil, nil - } else { - // TODO: Recurse and merge deltas when gen(revCacheDelta.toRevID) < gen(toRevId) - // until then, fall through to generating delta for given rev pair } } diff --git a/db/crud_test.go b/db/crud_test.go index c3c9fcb10d..ea8f99f355 100644 --- a/db/crud_test.go +++ b/db/crud_test.go @@ -1360,6 +1360,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Get the document body bytes with the tombstone revision rev3, with listRevisions=true // Also validate that the BodyRevisions property is present and correct. bodyBytes, removed, err = collection.get1xRevFromDoc(ctx, doc, rev3, true) + require.NoError(t, err) assert.NotEmpty(t, bodyBytes, "Document body bytes should be returned") assert.False(t, removed, "This shouldn't be a removed document") assert.NoError(t, response.Unmarshal(bodyBytes)) diff --git a/db/database_test.go b/db/database_test.go index dd51c4407f..29b85e898a 100644 --- a/db/database_test.go +++ b/db/database_test.go @@ -3130,8 +3130,8 @@ func TestGetDatabaseCollectionWithUserDefaultCollection(t *testing.T) { require.NoError(t, err) db, err := GetDatabase(dbCtx, nil) - defer db.Close(ctx) require.NoError(t, err) + defer db.Close(ctx) col, err := db.GetDatabaseCollectionWithUser(testCase.scope, testCase.collection) if testCase.err { require.Error(t, err) diff --git a/db/sg_replicate_cfg.go b/db/sg_replicate_cfg.go index f5932cd2f1..b05fec92a6 100644 --- a/db/sg_replicate_cfg.go +++ b/db/sg_replicate_cfg.go @@ -803,10 +803,9 @@ func (m *sgReplicateManager) RefreshReplicationCfg(ctx context.Context) error { // Check for replications newly assigned to this node for replicationID, replicationCfg := range configReplications { if replicationCfg.AssignedNode == m.localNodeUUID { - replicator, exists := m.activeReplicators[replicationID] + _, exists := m.activeReplicators[replicationID] if !exists { - var initError error - replicator, initError = m.InitializeReplication(replicationCfg) + replicator, initError := m.InitializeReplication(replicationCfg) if initError != nil { base.WarnfCtx(m.loggingCtx, "Error initializing replication %s: %v", initError) continue diff --git a/db/sg_replicate_cfg_test.go b/db/sg_replicate_cfg_test.go index 87ae458cc0..7310b92a65 100644 --- a/db/sg_replicate_cfg_test.go +++ b/db/sg_replicate_cfg_test.go @@ -43,7 +43,7 @@ func TestReplicateManagerReplications(t *testing.T) { assert.Equal(t, replication1_id, r.ID) // Request non-existent replication - r, err = manager.GetReplication("dne") + _, err = manager.GetReplication("dne") require.Error(t, err, base.ErrNotFound) // Attempt to add existing replication diff --git a/docs/api/paths/admin/db-.yaml b/docs/api/paths/admin/db-.yaml index d74cada125..8971c23b7b 100644 --- a/docs/api/paths/admin/db-.yaml +++ b/docs/api/paths/admin/db-.yaml @@ -66,30 +66,6 @@ get: description: Unique server identifier. type: string example: 995618a6a6cc9ac79731bd13240e19b5 - scopes: - description: 'Scopes that are used by the database.' - type: object - example: - scope1: - collections: - collection1: - update_seq: 123456 - collection2: - update_seq: 654321 - additionalProperties: - description: 'The name of the scope.' - type: object - properties: - collections: - description: 'The set of collections within the scope.' - additionalProperties: - description: 'The name of the collection.' - type: object - properties: - update_seq: - description: 'The last sequence number that was committed to the collection.' - type: integer - example: 123456 '404': $ref: ../../components/responses.yaml#/Not-found tags: diff --git a/go.mod b/go.mod index eb86172121..446e816588 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/couchbase/gocbcore/v10 v10.2.8 github.com/couchbase/gomemcached v0.2.1 github.com/couchbase/goutils v0.1.2 - github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148 + github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483 github.com/couchbaselabs/go-fleecedelta v0.0.0-20220909152808-6d09efa7a338 github.com/couchbaselabs/gocbconnstr v1.0.5 - github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6 + github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e github.com/davecgh/go-spew v1.1.1 github.com/elastic/gosigar v0.14.2 github.com/felixge/fgprof v0.9.3 diff --git a/go.sum b/go.sum index 89e1629fa0..863f88d8fc 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,11 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/aws/aws-sdk-go v1.44.299 h1:HVD9lU4CAFHGxleMJp95FV/sRhtg7P4miHD1v88JAQk= @@ -38,8 +41,10 @@ github.com/couchbase/gomemcached v0.2.1 h1:lDONROGbklo8pOt4Sr4eV436PVEaKDr3o9gUl github.com/couchbase/gomemcached v0.2.1/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9BCs= github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE= -github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148 h1:9E3u0yA+be219iLLOjuYgagOfM7UqtZ0YIhMXysJVKs= -github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= +github.com/couchbase/sg-bucket v0.0.0-20231108134134-545ec7bf1a9e h1:IFv4HcdpvKFEaaszv6f1WcEbWmU276rFzOaJgarw5gw= +github.com/couchbase/sg-bucket v0.0.0-20231108134134-545ec7bf1a9e/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= +github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483 h1:K6y82On0A3coA+GwW+HGKIwpCpca6ZSvTAJwwTmzCrg= +github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= github.com/couchbase/tools-common/cloud v1.0.0 h1:SQZIccXoedbrThehc/r9BJbpi/JhwJ8X00PDjZ2gEBE= github.com/couchbase/tools-common/cloud v1.0.0/go.mod h1:6KVlRpbcnDWrvickUJ+xpqCWx1vgYYlEli/zL4xmZAg= github.com/couchbase/tools-common/fs v1.0.0 h1:HFA4xCF/r3BtZShFJUxzVvGuXtDkqGnaPzYJP3Kp1mw= @@ -57,8 +62,10 @@ github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2T github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= github.com/couchbaselabs/gocbconnstr v1.0.5 h1:e0JokB5qbcz7rfnxEhNRTKz8q1svoRvDoZihsiwNigA= github.com/couchbaselabs/gocbconnstr v1.0.5/go.mod h1:KV3fnIKMi8/AzX0O9zOrO9rofEqrRF1d2rG7qqjxC7o= -github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6 h1:TeqaJ0zV0omrnvQfw4DF6o+UQQbFdBNPJVod1Y7ovQo= -github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6/go.mod h1:+HMmQTjaINo51eSZFeCKreXYSIu6jbIp+EV9keoKl3E= +github.com/couchbaselabs/rosmar v0.0.0-20231108144220-c0c6c76bb267 h1:dIYPzphKBskYB0viAtWHX/nHOimFuxyVwK9cFA103eA= +github.com/couchbaselabs/rosmar v0.0.0-20231108144220-c0c6c76bb267/go.mod h1:AY2mDCIVElNv3rdOAyFeb7g8phFbv821FuMxX4S6MzI= +github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e h1:6DyLYnzHE4dMfuyz0UEWiBOB/PfUXrxRUy1A4478k6A= +github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e/go.mod h1:+AjMZkAOGCeQRLjIBwehXKyWsNCPFrMKYz6lIaZ1idc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -98,6 +105,7 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= @@ -126,6 +134,7 @@ github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -255,6 +264,7 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/readline.v1 v1.0.0-20160726135117-62c6fe619375/go.mod h1:lNEQeAhU009zbRxng+XOj5ITVgY24WcbNnQopyfKoYQ= @@ -266,6 +276,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index 1483f3b608..6630a16840 100644 --- a/main.go +++ b/main.go @@ -9,16 +9,9 @@ package main import ( - "math/rand" - "time" - "github.com/couchbase/sync_gateway/rest" ) -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - // Simple Sync Gateway launcher tool. func main() { rest.ServerMain() diff --git a/manifest/4.0.xml b/manifest/4.0.xml new file mode 100644 index 0000000000..78ecd3d33f --- /dev/null +++ b/manifest/4.0.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/manifest/product-config.json b/manifest/product-config.json index 5314fb9eae..08dc2f1819 100644 --- a/manifest/product-config.json +++ b/manifest/product-config.json @@ -6,7 +6,7 @@ "release_name": "Couchbase Sync Gateway", "production": true, "interval": 30, - "go_version": "1.21.3", + "go_version": "1.21.4", "trigger_blackduck": true, "start_build": 1 }, @@ -487,7 +487,15 @@ "trigger_blackduck": true, "start_build": 586 }, - + "manifest/4.0.xml": { + "release": "4.0.0", + "release_name": "Couchbase Sync Gateway 4.0.0", + "production": true, + "interval": 30, + "go_version": "1.21.4", + "trigger_blackduck": true, + "start_build": 1 + }, "manifest/dev.xml": { "release": "dev", "release_name": "Couchbase Sync Gateway Dev", diff --git a/rest/adminapitest/admin_api_test.go b/rest/adminapitest/admin_api_test.go index 73a9a2d02e..a383ed3b8d 100644 --- a/rest/adminapitest/admin_api_test.go +++ b/rest/adminapitest/admin_api_test.go @@ -1472,7 +1472,7 @@ func TestCorruptDbConfigHandling(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyConfig) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1557,7 +1557,7 @@ func TestBadConfigInsertionToBucket(t *testing.T) { base.TestsRequireBootstrapConnection(t) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1608,11 +1608,11 @@ func TestMismatchedBucketNameOnDbConfigUpdate(t *testing.T) { base.TestsRequireBootstrapConnection(t) base.RequireNumTestBuckets(t, 2) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1643,11 +1643,11 @@ func TestMultipleBucketWithBadDbConfigScenario1(t *testing.T) { base.TestsRequireBootstrapConnection(t) base.RequireNumTestBuckets(t, 3) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) - tb3 := base.GetPersistentTestBucket(t) + tb3 := base.GetTestBucket(t) defer tb3.Close(ctx) const groupID = "60ce5544-c368-4b08-b0ed-4ca3b37973f9" @@ -1722,9 +1722,9 @@ func TestMultipleBucketWithBadDbConfigScenario2(t *testing.T) { base.RequireNumTestBuckets(t, 3) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) rt1 := rest.NewRestTester(t, &rest.RestTesterConfig{ @@ -1792,9 +1792,9 @@ func TestMultipleBucketWithBadDbConfigScenario3(t *testing.T) { base.TestsRequireBootstrapConnection(t) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ diff --git a/rest/adminapitest/collections_admin_api_test.go b/rest/adminapitest/collections_admin_api_test.go index 30eca1819d..92635b978f 100644 --- a/rest/adminapitest/collections_admin_api_test.go +++ b/rest/adminapitest/collections_admin_api_test.go @@ -179,7 +179,6 @@ func TestRequireResync(t *testing.T) { base.RequireNumTestDataStores(t, 2) base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), PersistentConfig: true, } diff --git a/rest/api.go b/rest/api.go index 355970687d..90b824a5f4 100644 --- a/rest/api.go +++ b/rest/api.go @@ -258,7 +258,7 @@ func (h *handler) handleFlush() error { name := h.db.Name config := h.server.GetDatabaseConfig(name) h.server.RemoveDatabase(h.ctx(), name) - err := bucket.CloseAndDelete() + err := bucket.CloseAndDelete(h.ctx()) _, err2 := h.server.AddDatabaseFromConfig(h.ctx(), config.DatabaseConfig) if err == nil { err = err2 @@ -394,17 +394,16 @@ func (h *handler) instanceStartTimeMicro() int64 { } type DatabaseRoot struct { - DBName string `json:"db_name"` - SequenceNumber *uint64 `json:"update_seq,omitempty"` // The last sequence written to the _default collection, if not running with multiple collections. - CommittedUpdateSequenceNumber *uint64 `json:"committed_update_seq,omitempty"` // Same as above - Used by perf tests, shouldn't be removed - InstanceStartTimeMicro int64 `json:"instance_start_time"` // microseconds since epoch - CompactRunning bool `json:"compact_running"` - PurgeSequenceNumber uint64 `json:"purge_seq"` - DiskFormatVersion uint64 `json:"disk_format_version"` - State string `json:"state"` - ServerUUID string `json:"server_uuid,omitempty"` - RequireResync []string `json:"require_resync,omitempty"` - Scopes map[string]databaseRootScope `json:"scopes,omitempty"` // stats for each scope/collection + DBName string `json:"db_name"` + SequenceNumber *uint64 `json:"update_seq,omitempty"` // The last sequence written to the _default collection, if not running with multiple collections. + CommittedUpdateSequenceNumber *uint64 `json:"committed_update_seq,omitempty"` // Same as above - Used by perf tests, shouldn't be removed + InstanceStartTimeMicro int64 `json:"instance_start_time"` // microseconds since epoch + CompactRunning bool `json:"compact_running"` + PurgeSequenceNumber uint64 `json:"purge_seq"` + DiskFormatVersion uint64 `json:"disk_format_version"` + State string `json:"state"` + ServerUUID string `json:"server_uuid,omitempty"` + RequireResync []string `json:"require_resync,omitempty"` } type dbSummary struct { @@ -413,33 +412,22 @@ type dbSummary struct { State string `json:"state"` } -type databaseRootScope struct { - Collections map[string]databaseRootCollection `json:"collections,omitempty"` -} - -type databaseRootCollection struct { - SequenceNumber uint64 `json:"update_seq"` // The last sequence written for this collection -} - func (h *handler) handleGetDB() error { if h.rq.Method == "HEAD" { return nil } - // TODO: If running with multiple collections, leave nil - var defaultCollectionLastSeq *uint64 - // Don't bother trying to lookup LastSequence() if offline + var lastSeq uint64 runState := db.RunStateString[atomic.LoadUint32(&h.db.State)] if runState != db.RunStateString[db.DBOffline] { - lastSeq, _ := h.db.LastSequence(h.ctx()) - defaultCollectionLastSeq = &lastSeq + lastSeq, _ = h.db.LastSequence(h.ctx()) } var response = DatabaseRoot{ DBName: h.db.Name, - SequenceNumber: defaultCollectionLastSeq, - CommittedUpdateSequenceNumber: defaultCollectionLastSeq, + SequenceNumber: &lastSeq, + CommittedUpdateSequenceNumber: &lastSeq, InstanceStartTimeMicro: h.instanceStartTimeMicro(), CompactRunning: h.db.IsCompactRunning(), PurgeSequenceNumber: 0, // TODO: Should track this value @@ -447,16 +435,6 @@ func (h *handler) handleGetDB() error { State: runState, ServerUUID: h.db.DatabaseContext.ServerUUID, RequireResync: h.db.RequireResync.ScopeAndCollectionNames(), - - // TODO: If running with multiple scope/collections - // Scopes: map[string]databaseRootScope{ - // "scope1": { - // Collections: map[string]databaseRootCollection{ - // "collection1": {SequenceNumber: 123456}, - // "collection2": {SequenceNumber: 987654}, - // }, - // }, - // }, } h.writeJSON(response) diff --git a/rest/api_collections_test.go b/rest/api_collections_test.go index 6472f81dfb..9a7b532c68 100644 --- a/rest/api_collections_test.go +++ b/rest/api_collections_test.go @@ -267,7 +267,7 @@ func TestMultiCollectionChannelAccess(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) ctx := base.TestCtx(t) - tb := base.GetPersistentTestBucket(t) + tb := base.GetTestBucket(t) defer tb.Close(ctx) scopesConfig := GetCollectionsConfig(t, tb, 2) @@ -281,9 +281,8 @@ func TestMultiCollectionChannelAccess(t *testing.T) { scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - fmt.Println(scopesConfig) rtConfig := &RestTesterConfig{ - CustomTestBucket: tb.NoCloseClone(), + CustomTestBucket: tb, DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ Scopes: scopesConfig, NumIndexReplicas: base.UintPtr(0), @@ -337,16 +336,21 @@ func TestMultiCollectionChannelAccess(t *testing.T) { RequireStatus(t, resp, http.StatusOK) // Add a new collection and update the db config - scopesConfig = GetCollectionsConfig(t, tb, 3) - dataStoreNames = GetDataStoreNamesFromScopesConfig(scopesConfig) + scopesConfig3Collections := GetCollectionsConfig(t, tb, 3) + dataStoreNames = GetDataStoreNamesFromScopesConfig(scopesConfig3Collections) collection3 := dataStoreNames[2].CollectionName() - scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection3] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfigString, err := json.Marshal(scopesConfig) + scopesConfig3Collections[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig3Collections[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig3Collections[scope].Collections[collection3] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfigString, err := json.Marshal(scopesConfig3Collections) require.NoError(t, err) + scopesConfig2Collections := GetCollectionsConfig(t, tb, 2) + + scopesConfig2Collections[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig2Collections[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} + resp = rt.SendAdminRequest("PUT", "/db/_config", fmt.Sprintf( `{"bucket": "%s", "num_index_replicas": 0, "enable_shared_bucket_access": %t, "scopes":%s}`, tb.GetName(), base.TestUseXattrs(), string(scopesConfigString))) @@ -378,11 +382,7 @@ func TestMultiCollectionChannelAccess(t *testing.T) { RequireStatus(t, resp, http.StatusOK) // Remove collection and update the db config - scopesConfig = GetCollectionsConfig(t, tb, 2) - - scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfigString, err = json.Marshal(scopesConfig) + scopesConfigString, err = json.Marshal(scopesConfig2Collections) require.NoError(t, err) resp = rt.SendAdminRequest("PUT", "/db/_config", fmt.Sprintf( diff --git a/rest/api_test.go b/rest/api_test.go index ea25a09857..377e8b06d8 100644 --- a/rest/api_test.go +++ b/rest/api_test.go @@ -1700,8 +1700,8 @@ func TestLongpollWithWildcard(t *testing.T) { // has a wait counter of zero (no documents writted since the listener was restarted). wg := sync.WaitGroup{} // start changes request + wg.Add(1) go func() { - wg.Add(1) defer wg.Done() changesJSON := `{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"0"}` changesResponse := rt.SendUserRequest("POST", "/{{.keyspace}}/_changes", changesJSON, "bernard") diff --git a/rest/attachment_test.go b/rest/attachment_test.go index 0b480d4127..f44408068d 100644 --- a/rest/attachment_test.go +++ b/rest/attachment_test.go @@ -2260,184 +2260,205 @@ func TestAttachmentDeleteOnExpiry(t *testing.T) { } func TestUpdateExistingAttachment(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + doc1Version := rt.PutDoc(doc1ID, `{}`) + doc2Version := rt.PutDoc(doc2ID, `{}`) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + require.NoError(t, rt.WaitForPendingChanges()) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) - doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - assert.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) + + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) + require.NoError(t, err) - assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, "attachmentB", string(req.BodyBytes())) + doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + assert.NoError(t, err) + + assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + + req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, "attachmentB", string(req.BodyBytes())) + }) } // TestPushUnknownAttachmentAsStub sets revpos to an older generation, for an attachment that doesn't exist on the server. // Verifies that getAttachment is triggered, and attachment is properly persisted. func TestPushUnknownAttachmentAsStub(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() - + } const doc1ID = "doc1" - doc1Version := rt.PutDoc(doc1ID, `{}`) + btcRunner := NewBlipTesterClientRunner(t) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Add doc1 and doc2 + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) + require.NoError(t, btc.rt.WaitForPendingChanges()) - // force attachment into test client's store to validate it's fetched - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - contentType := "text/plain" + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - length, digest, err := btc.saveAttachment(contentType, attachmentAData) - require.NoError(t, err) - // Update doc1, include reference to non-existing attachment with recent revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) - require.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) - require.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + // force attachment into test client's store to validate it's fetched + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + contentType := "text/plain" - // verify that attachment exists on document and was persisted - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + length, digest, err := btcRunner.saveAttachment(btc.id, contentType, attachmentAData) + require.NoError(t, err) + // Update doc1, include reference to non-existing attachment with recent revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) + require.NoError(t, err) + + require.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + // verify that attachment exists on document and was persisted + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + }) } func TestMinRevPosWorkToAvoidUnnecessaryProveAttachment(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ AllowConflicts: base.BoolPtr(true), }, }, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } - // Push an initial rev with attachment data + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc" - initialVersion := rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) - // Replicate data to client and ensure doc arrives - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, initialVersion) - assert.True(t, found) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Push a revision with a bunch of history simulating doc updated on mobile device - // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment - proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() - revid, err := btc.PushRevWithHistory(docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Push an initial rev with attachment data + initialVersion := btc.rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Push another bunch of history - _, err = btc.PushRevWithHistory(docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + // Replicate data to client and ensure doc arrives + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, initialVersion) + assert.True(t, found) + + // Push a revision with a bunch of history simulating doc updated on mobile device + // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment + proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() + revid, err := btcRunner.PushRevWithHistory(btc.id, docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + + // Push another bunch of history + _, err = btcRunner.PushRevWithHistory(btc.id, docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + }) } + func TestAttachmentWithErroneousRevPos(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) - // Create rev 1 with the hello.txt attachment - const docID = "doc" - version := rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Pull rev and attachment down to client - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, version) - assert.True(t, found) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Create rev 1 with the hello.txt attachment + const docID = "doc" + version := btc.rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Add an attachment to client - btc.AttachmentsLock().Lock() - btc.Attachments()["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") - btc.AttachmentsLock().Unlock() + // Pull rev and attachment down to client + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, found) - // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment - _, err = btc.PushRevWithHistory(docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) - require.NoError(t, err) + // Add an attachment to client + btcRunner.AttachmentsLock(btc.id).Lock() + btcRunner.Attachments(btc.id)["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") + btcRunner.AttachmentsLock(btc.id).Unlock() - // Ensure message and attachment is pushed up - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment + _, err = btcRunner.PushRevWithHistory(btc.id, docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) + require.NoError(t, err) - // Get the attachment and ensure the data is updated - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + // Ensure message and attachment is pushed up + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + // Get the attachment and ensure the data is updated + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + }) } // CBG-2004: Test that prove attachment over Blip works correctly when receiving a ErrAttachmentNotFound @@ -2578,74 +2599,79 @@ func TestPutInvalidAttachment(t *testing.T) { // validates that proveAttachment isn't being invoked when the attachment is already present and the // digest doesn't change, regardless of revpos. func TestCBLRevposHandling(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) + doc2Version := btc.rt.PutDoc(doc2ID, `{}`) + require.NoError(t, btc.rt.WaitForPendingChanges()) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - // Update doc1, don't change attachment, use correct revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) - // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) - require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - // Validate attachment exists - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use correct revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) + require.NoError(t, err) - attachmentPushCount := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment - _, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) - require.NoError(t, err) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - // Validate attachment exists and is updated - attResponse = rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) + require.NoError(t, err) - attachmentPushCountAfter := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + // Validate attachment exists + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + attachmentPushCount := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment + _, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) + require.NoError(t, err) + + // Validate attachment exists and is updated + attResponse = btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + + attachmentPushCountAfter := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + }) } // Helper_Functions diff --git a/rest/blip_api_attachment_test.go b/rest/blip_api_attachment_test.go index 266e580678..15a16c2c62 100644 --- a/rest/blip_api_attachment_test.go +++ b/rest/blip_api_attachment_test.go @@ -43,56 +43,63 @@ func TestBlipPushPullV2AttachmentV2Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + // given this test is for v2 protocol, skip version vector test + btcRunner.SkipVersionVectorInitialization = true const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + opts := &BlipTesterClientOpts{} + opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - respBody := rt.GetDocVersion(docID, version) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // Test pushing and pulling v2 attachments with v3 client @@ -113,54 +120,59 @@ func TestBlipPushPullV2AttachmentV3Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // TestBlipProveAttachmentV2 ensures that CBL's proveAttachment for deduplication is working correctly even for v2 attachments which aren't de-duped on the server side. @@ -169,56 +181,59 @@ func TestBlipProveAttachmentV2(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) const ( doc1ID = "doc1" doc2ID = "doc2" ) - const ( attachmentName = "hello.txt" attachmentData = "hello world" ) - var ( attachmentDataB64 = base64.StdEncoding.EncodeToString([]byte(attachmentData)) attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, - // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version := rt.PutDoc(doc1ID, doc1Body) - - data, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) - - // create doc2 now that we know the client has the attachment - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version := rt.PutDoc(doc2ID, doc2Body) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - data, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) - bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) - assert.Equal(t, int64(len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, + // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version := btc.rt.PutDoc(doc1ID, doc1Body) + + data, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + // create doc2 now that we know the client has the attachment + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version := btc.rt.PutDoc(doc2ID, doc2Body) + + data, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) + bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) + assert.Equal(t, int64(len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + }) } // TestBlipProveAttachmentV2Push ensures that CBL's attachment deduplication is ignored for push replications - resulting in new server-side digests and duplicated attachment data (v2 attachment format). @@ -227,50 +242,51 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - const ( doc1ID = "doc1" doc2ID = "doc2" ) - const ( attachmentName = "hello.txt" attachmentData = "hello world" ) - var ( attachmentDataB64 = base64.StdEncoding.EncodeToString([]byte(attachmentData)) // attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version, err := btc.PushRev(doc1ID, EmptyDocVersion(), []byte(doc1Body)) - require.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - err = rt.WaitForVersion(doc1ID, doc1Version) - require.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version, err := btc.PushRev(doc2ID, EmptyDocVersion(), []byte(doc2Body)) - require.NoError(t, err) - - err = rt.WaitForVersion(doc2ID, doc2Version) - require.NoError(t, err) - - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(2*len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version, err := btcRunner.PushRev(btc.id, doc1ID, EmptyDocVersion(), []byte(doc1Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc1ID, doc1Version) + require.NoError(t, err) + + // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version, err := btcRunner.PushRev(btc.id, doc2ID, EmptyDocVersion(), []byte(doc2Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc2ID, doc2Version) + require.NoError(t, err) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(2*len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { @@ -278,130 +294,139 @@ func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "2-abc", revId) - - // Wait for the documents to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, - // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err = btc.PushRevWithHistory(docID, revId, []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(4) - assert.True(t, ok) - - resp = rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "2-abc", revId) + + // Wait for the documents to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, + // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err = btcRunner.PushRevWithHistory(btc.id, docID, revId, []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(4) + assert.True(t, ok) + + resp = btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } func TestBlipPushPullNewAttachmentNoCommonAncestor(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) const docID = "doc1" - - // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. - // rev tree pruning on the CBL side, so 1-abc no longer exists. - // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "2-abc", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(4), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. + // rev tree pruning on the CBL side, so 1-abc no longer exists. + // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "2-abc", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(4), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } // Test Attachment replication behavior described here: https://github.com/couchbase/couchbase-lite-core/wiki/Replication-Protocol @@ -507,163 +532,181 @@ func TestPutAttachmentViaBlipGetViaBlip(t *testing.T) { // TestBlipAttachNameChange tests CBL handling - attachments with changed names are sent as stubs, and not new attachments func TestBlipAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - attachmentA := []byte("attachmentA") - attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) - digest := db.Sha1DigestKey(attachmentA) - - // Push initial attachment data - version, err := client1.PushRev("doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) - require.NoError(t, err) - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attachmentA) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - version, err = client1.PushRev("doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - // Check if attachment is still in bucket - bucketAttachmentA, _, err = rt.GetSingleDataStore().GetRaw(attachmentAKey) - assert.NoError(t, err) - assert.Equal(t, bucketAttachmentA, attachmentA) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attachmentA, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + + attachmentA := []byte("attachmentA") + attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) + digest := db.Sha1DigestKey(attachmentA) + + // Push initial attachment data + version, err := btcRunner.PushRev(client1.id, "doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) + require.NoError(t, err) + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attachmentA) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + // Check if attachment is still in bucket + bucketAttachmentA, _, err = client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + assert.NoError(t, err) + assert.Equal(t, bucketAttachmentA, attachmentA) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attachmentA, resp.BodyBytes()) + }) } // TestBlipLegacyAttachNameChange ensures that CBL name changes for legacy attachments are handled correctly func TestBlipLegacyAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - // Create document in the bucket with a legacy attachment - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - // Get the document and grab the revID. - docVersion, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, docVersion.RevID, rawDoc) - - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - docVersion, err = client1.PushRev("doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", docVersion) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + // Get the document and grab the revID. + docVersion, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, docVersion.RevID, rawDoc) + + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + docVersion, err = btcRunner.PushRev(client1.id, "doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", docVersion) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + }) } // TestBlipLegacyAttachDocUpdate ensures that CBL updates for documents associated with legacy attachments are handled correctly func TestBlipLegacyAttachDocUpdate(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - - // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - attName := "hi.txt" - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - version, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, version.RevID, rawDoc) - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - dataStore := rt.GetSingleDataStore() - bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Update the document, leaving body intact - version, err = client1.PushRev("doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) - - // Validate that the attachment hasn't been migrated to V2 - v1Key := db.MakeAttachmentKey(1, "doc", digest) - v1Body, _, err := dataStore.GetRaw(v1Key) - require.NoError(t, err) - require.EqualValues(t, attBody, v1Body) - - v2Key := db.MakeAttachmentKey(2, "doc", digest) - _, _, err = dataStore.GetRaw(v2Key) - require.Error(t, err) - // Confirm correct type of error for both integration test and Walrus - if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { - var keyValueErr *gocb.KeyValueError - require.True(t, errors.As(err, &keyValueErr)) - //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) - require.Equal(t, keyValueErr.DocumentID, v2Key) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, } + + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + attName := "hi.txt" + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + version, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, version.RevID, rawDoc) + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + dataStore := client1.rt.GetSingleDataStore() + bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Update the document, leaving body intact + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + + // Validate that the attachment hasn't been migrated to V2 + v1Key := db.MakeAttachmentKey(1, "doc", digest) + v1Body, _, err := dataStore.GetRaw(v1Key) + require.NoError(t, err) + require.EqualValues(t, attBody, v1Body) + + v2Key := db.MakeAttachmentKey(2, "doc", digest) + _, _, err = dataStore.GetRaw(v2Key) + require.Error(t, err) + // Confirm correct type of error for both integration test and Walrus + if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { + var keyValueErr *gocb.KeyValueError + require.True(t, errors.As(err, &keyValueErr)) + //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) + require.Equal(t, keyValueErr.DocumentID, v2Key) + } + }) } // TestAttachmentComputeStat: @@ -676,31 +719,33 @@ func TestAttachmentComputeStat(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() - // Wait for the document to be replicated to client. - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - // assert the attachment read compute stat is incremented - require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + // Wait for the document to be replicated to client. + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // assert the attachment read compute stat is incremented + require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + }) } diff --git a/rest/blip_api_collections_test.go b/rest/blip_api_collections_test.go index 7839daa4ee..5663e1227e 100644 --- a/rest/blip_api_collections_test.go +++ b/rest/blip_api_collections_test.go @@ -28,322 +28,344 @@ func TestBlipGetCollections(t *testing.T) { // checkpointIDWithError := "checkpointError" const defaultScopeAndCollection = "_default._default" - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{GuestEnabled: true}, 1) - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, - &BlipTesterClientOpts{ + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 1) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ SkipCollectionsInitialization: true, - }, - ) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - testCases := []struct { - name string - requestBody db.GetCollectionsRequestBody - resultBody []db.Body - errorCode string - }{ - { - name: "noDocInDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{defaultScopeAndCollection}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + testCases := []struct { + name string + requestBody db.GetCollectionsRequestBody + resultBody []db.Body + errorCode string + }{ + { + name: "noDocInDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "mismatchedLengthOnInput", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id", "id2"}, - Collections: []string{defaultScopeAndCollection}, + { + name: "mismatchedLengthOnInput", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id", "id2"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "inDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{defaultScopeAndCollection}, + { + name: "inDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "badScopeSpecificationEmptyString", - // bad scope specification - empty string - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{""}, + { + name: "badScopeSpecificationEmptyString", + // bad scope specification - empty string + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{""}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "presentNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{scopeAndCollection}, + { + name: "presentNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{checkpoint1Body}, + errorCode: "", }, - resultBody: []db.Body{checkpoint1Body}, - errorCode: "", - }, - { - name: "unseenInNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{scopeAndCollection}, + { + name: "unseenInNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{db.Body{}}, + errorCode: "", }, - resultBody: []db.Body{db.Body{}}, - errorCode: "", - }, - // { - // name: "checkpointExistsWithErrorInNonDefaultCollection", - // requestBody: db.GetCollectionsRequestBody{ - // CheckpointIDs: []string{checkpointIDWithError}, - // Collections: []string{scopeAndCollection}, - // }, - // resultBody: []db.Body{nil}, - // errorCode: "", - // }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the norev handler - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) - require.Equal(t, errorCode, testCase.errorCode) - if testCase.errorCode != "" { - return - } - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) + // { + // name: "checkpointExistsWithErrorInNonDefaultCollection", + // requestBody: db.GetCollectionsRequestBody{ + // CheckpointIDs: []string{checkpointIDWithError}, + // Collections: []string{scopeAndCollection}, + // }, + // resultBody: []db.Body{nil}, + // errorCode: "", + // }, + } - require.Equal(t, testCase.resultBody, checkpoints) - }) - } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the norev handler + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) + require.Equal(t, errorCode, testCase.errorCode) + if testCase.errorCode != "" { + return + } + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + + require.Equal(t, testCase.resultBody, checkpoints) + }) + } + }) } func TestBlipReplicationNoDefaultCollection(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + + subChangesRequest := blip.NewRequest() + subChangesRequest.SetProfile(db.MessageSubChanges) + + require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) + resp := subChangesRequest.Response() + require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - - subChangesRequest := blip.NewRequest() - subChangesRequest.SetProfile(db.MessageSubChanges) - - require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) - resp := subChangesRequest.Response() - require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) } func TestBlipGetCollectionsAndSetCheckpoint(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, - }) - - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the GetCollections - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.False(t, hasErrorCode) - require.Equal(t, errorCode, "") - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) - require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) - - // make sure other functions get called - - requestGetCheckpoint := blip.NewRequest() - requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) - requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 - requestGetCheckpoint.Properties[db.BlipCollection] = "0" - require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) - resp = requestGetCheckpoint.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] - require.Equal(t, errorCode, "") - require.False(t, hasErrorCode) - var checkpoint db.Body - err = resp.ReadJSONBody(&checkpoint) - require.NoErrorf(t, err, "Actual error %+v", checkpoint) - - require.Equal(t, db.Body{"seq": "123"}, checkpoint) + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, + }) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the GetCollections + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.False(t, hasErrorCode) + require.Equal(t, errorCode, "") + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) + + // make sure other functions get called + + requestGetCheckpoint := blip.NewRequest() + requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) + requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 + requestGetCheckpoint.Properties[db.BlipCollection] = "0" + require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) + resp = requestGetCheckpoint.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] + require.Equal(t, errorCode, "") + require.False(t, hasErrorCode) + var checkpoint db.Body + err = resp.ReadJSONBody(&checkpoint) + require.NoErrorf(t, err, "Actual error %+v", checkpoint) + + require.Equal(t, db.Body{"seq": "123"}, checkpoint) + }) } func TestCollectionsReplication(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - const docID = "doc1" - version := rt.PutDoc(docID, "{}") - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + version := btc.rt.PutDoc(docID, "{}") + require.NoError(t, btc.rt.WaitForPendingChanges()) - btcCollection := btc.SingleCollection() + btcCollection := btcRunner.SingleCollection(btc.id) - err = btcCollection.StartOneshotPull() - require.NoError(t, err) + err := btcCollection.StartOneshotPull() + require.NoError(t, err) - _, ok := btcCollection.WaitForVersion(docID, version) - require.True(t, ok) + _, ok := btcCollection.WaitForVersion(docID, version) + require.True(t, ok) + }) } func TestBlipReplicationMultipleCollections(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) - docName := "doc1" - body := `{"foo":"bar"}` - versions := make([]DocVersion, 0, len(rt.GetKeyspaces())) - for _, keyspace := range rt.GetKeyspaces() { - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) - RequireStatus(t, resp, http.StatusCreated) - versions = append(versions, DocVersionFromPutResponse(t, resp)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() - } - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartPull()) - } + docName := "doc1" + body := `{"foo":"bar"}` + versions := make([]DocVersion, 0, len(btc.rt.GetKeyspaces())) + for _, keyspace := range btc.rt.GetKeyspaces() { + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) + RequireStatus(t, resp, http.StatusCreated) + versions = append(versions, DocVersionFromPutResponse(t, resp)) + } + require.NoError(t, btc.rt.WaitForPendingChanges()) - for i, collectionClient := range btc.collectionClients { - msg, ok := collectionClient.WaitForVersion(docName, versions[i]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartPull()) + } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for i, collectionClient := range btc.collectionClients { + msg, ok := collectionClient.WaitForVersion(docName, versions[i]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } func TestBlipReplicationMultipleCollectionsMismatchedDocSizes(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - body := `{"foo":"bar"}` - collectionDocIDs := make(map[string][]string) - collectionVersions := make(map[string][]DocVersion) - require.Len(t, rt.GetKeyspaces(), 2) - for i, keyspace := range rt.GetKeyspaces() { - // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one - docCount := 10 - if i == 0 { - docCount = 1 + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + body := `{"foo":"bar"}` + collectionDocIDs := make(map[string][]string) + collectionVersions := make(map[string][]DocVersion) + require.Len(t, btc.rt.GetKeyspaces(), 2) + for i, keyspace := range btc.rt.GetKeyspaces() { + // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one + docCount := 10 + if i == 0 { + docCount = 1 + } + blipName := btc.rt.getCollectionsForBLIP()[i] + for j := 0; j < docCount; j++ { + docName := fmt.Sprintf("doc%d", j) + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) + RequireStatus(t, resp, http.StatusCreated) + + version := DocVersionFromPutResponse(t, resp) + collectionVersions[blipName] = append(collectionVersions[blipName], version) + collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + } } - blipName := rt.getCollectionsForBLIP()[i] - for j := 0; j < docCount; j++ { - docName := fmt.Sprintf("doc%d", j) - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) - RequireStatus(t, resp, http.StatusCreated) + require.NoError(t, btc.rt.WaitForPendingChanges()) - version := DocVersionFromPutResponse(t, resp) - collectionVersions[blipName] = append(collectionVersions[blipName], version) - collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartOneshotPull()) } - } - require.NoError(t, rt.WaitForPendingChanges()) - - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartOneshotPull()) - } - - for _, collectionClient := range btc.collectionClients { - versions := collectionVersions[collectionClient.collection] - docIDs := collectionDocIDs[collectionClient.collection] - msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for _, collectionClient := range btc.collectionClients { + versions := collectionVersions[collectionClient.collection] + docIDs := collectionDocIDs[collectionClient.collection] + msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } diff --git a/rest/blip_api_crud_test.go b/rest/blip_api_crud_test.go index 7c041f7cb5..581abbe797 100644 --- a/rest/blip_api_crud_test.go +++ b/rest/blip_api_crud_test.go @@ -1836,64 +1836,73 @@ func TestBlipPullRevMessageHistory(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + const docID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + data, ok := btcRunner.WaitForVersion(client.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version + data, ok = btcRunner.WaitForVersion(client.id, docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version + }) } // Reproduces CBG-617 (a client using activeOnly for the initial replication, and then still expecting to get subsequent tombstones afterwards) func TestActiveOnlyContinuous(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + rtConfig := &RestTesterConfig{GuestEnabled: true} - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - const docID = "doc1" - version := rt.PutDoc(docID, `{"test":true}`) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // start an initial pull - require.NoError(t, btc.StartPullSince("true", "0", "true")) - rev, found := btc.WaitForVersion(docID, version) - assert.True(t, found) - assert.Equal(t, `{"test":true}`, string(rev)) + version := rt.PutDoc(docID, `{"test":true}`) - // delete the doc and make sure the client still gets the tombstone replicated - deletedVersion := rt.DeleteDocReturnVersion(docID, version) + // start an initial pull + require.NoError(t, btcRunner.StartPullSince(btc.id, "true", "0", "true")) + rev, found := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, found) + assert.Equal(t, `{"test":true}`, string(rev)) - rev, found = btc.WaitForVersion(docID, deletedVersion) - assert.True(t, found) - assert.Equal(t, `{}`, string(rev)) + // delete the doc and make sure the client still gets the tombstone replicated + deletedVersion := rt.DeleteDocReturnVersion(docID, version) + + rev, found = btcRunner.WaitForVersion(btc.id, docID, deletedVersion) + assert.True(t, found) + assert.Equal(t, `{}`, string(rev)) + }) } // Test that exercises Sync Gateway's norev handler @@ -1901,34 +1910,39 @@ func TestBlipNorev(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - norevMsg := db.NewNoRevMessage() - norevMsg.SetId("docid") - norevMsg.SetRev("1-a") - norevMsg.SetSequence(db.SequenceID{Seq: 50}) - norevMsg.SetError("404") - norevMsg.SetReason("couldn't send xyz") - btc.addCollectionProperty(norevMsg.Message) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + norevMsg := db.NewNoRevMessage() + norevMsg.SetId("docid") + norevMsg.SetRev("1-a") + norevMsg.SetSequence(db.SequenceID{Seq: 50}) + norevMsg.SetError("404") + norevMsg.SetReason("couldn't send xyz") + btc.addCollectionProperty(norevMsg.Message) - // Couchbase Lite always sends noreply=true for norev messages - // but set to false so we can block waiting for a reply - norevMsg.SetNoReply(false) + // Couchbase Lite always sends noreply=true for norev messages + // but set to false so we can block waiting for a reply + norevMsg.SetNoReply(false) - // Request that the handler used to process the message is sent back in the response - norevMsg.Properties[db.SGShowHandler] = "true" + // Request that the handler used to process the message is sent back in the response + norevMsg.Properties[db.SGShowHandler] = "true" - assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) + assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) - // Check that the response we got back was processed by the norev handler - resp := norevMsg.Response() - assert.NotNil(t, resp) - assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) + // Check that the response we got back was processed by the norev handler + resp := norevMsg.Response() + assert.NotNil(t, resp) + assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) + }) } // TestNoRevSetSeq makes sure the correct string is used with the corresponding function @@ -1949,99 +1963,103 @@ func TestRemovedMessageWithAlternateAccess(t *testing.T) { defer db.SuspendSequenceBatching()() base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) + const docID = "doc" + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) + version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - version = rt.UpdateDoc(docID, version, `{"channels": []}`) - const docMarker = "docmarker" - docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) - assert.False(t, changes.Results[0].Revoked) - assert.Equal(t, "docmarker", changes.Results[1].ID) - RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) - assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) - assert.False(t, changes.Results[1].Revoked) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docMarker, docMarkerVersion) - assert.True(t, ok) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - messages := btc.pullReplication.GetMessages() + version = rt.UpdateDoc(docID, version, `{"channels": []}`) + const docMarker = "docmarker" + docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() + changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + assert.Len(t, changes.Results, 2) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) + assert.False(t, changes.Results[0].Revoked) + assert.Equal(t, "docmarker", changes.Results[1].ID) + RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) + assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) + assert.False(t, changes.Results[1].Revoked) + + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docMarker, docMarkerVersion) + assert.True(t, ok) + + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 3) - require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A - require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B - require.Len(t, messageBody[2], 3) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 3) + require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A + require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B + require.Len(t, messageBody[2], 3) - deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - id := messageBody[0].([]interface{})[1] - require.NoError(t, err) - assert.Equal(t, "doc", id) - assert.Equal(t, int64(4), deletedFlags) + deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + id := messageBody[0].([]interface{})[1] + require.NoError(t, err) + assert.Equal(t, "doc", id) + assert.Equal(t, int64(4), deletedFlags) + }) } // TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication tests the following scenario: @@ -2057,91 +2075,95 @@ func TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication(t *testi defer db.SuspendSequenceBatching()() base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) - const ( - docID = "doc" - ) - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + const ( + docID = "doc" + ) + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) - require.NoError(t, rt.WaitForPendingChanges()) - // At this point changes should send revocation, as document isn't in any of the user's channels - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) + version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) + require.NoError(t, rt.WaitForPendingChanges()) + // At this point changes should send revocation, as document isn't in any of the user's channels + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - markerID := "docmarker" - markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) - require.NoError(t, rt.WaitForPendingChanges()) + err = btcRunner.StartOneshotPullFiltered(btc.id, "A") + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API - assert.Equal(t, "doc", changes.Results[0].ID) - assert.Equal(t, markerID, changes.Results[1].ID) + _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + markerID := "docmarker" + markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) + require.NoError(t, rt.WaitForPendingChanges()) - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(markerID, markerVersion) - assert.True(t, ok) + // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API + assert.Equal(t, "doc", changes.Results[0].ID) + assert.Equal(t, markerID, changes.Results[1].ID) - messages := btc.pullReplication.GetMessages() + err = btcRunner.StartOneshotPullFiltered(btc.id, "A") + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, markerID, markerVersion) + assert.True(t, ok) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 1) - require.Len(t, messageBody[0], 3) // marker doc - require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 1) + require.Len(t, messageBody[0], 3) // marker doc + require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) + }) } // Make sure that a client cannot open multiple subChanges subscriptions on a single blip context (SG #3222) @@ -2361,54 +2383,58 @@ func TestBlipInternalPropertiesHandling(t *testing.T) { }, } - // Setup - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + // Setup + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + }) + defer rt.Close() - // Track last sequence for next changes feed - var changes ChangesResults - changes.Last_Seq = "0" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - for i, test := range testCases { - t.Run(test.name, func(t *testing.T) { - docID := fmt.Sprintf("test%d", i) - rawBody, err := json.Marshal(test.inputBody) - require.NoError(t, err) + // Track last sequence for next changes feed + var changes ChangesResults + changes.Last_Seq = "0" - _, err = client.PushRev(docID, EmptyDocVersion(), rawBody) + for i, test := range testCases { + t.Run(test.name, func(t *testing.T) { + docID := fmt.Sprintf("test%d", i) + rawBody, err := json.Marshal(test.inputBody) + require.NoError(t, err) - if test.expectReject { - assert.Error(t, err) - return - } - assert.NoError(t, err) - - // Wait for rev to be received on RT - err = rt.WaitForPendingChanges() - require.NoError(t, err) - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) - require.NoError(t, err) + _, err = btcRunner.PushRev(client.id, docID, EmptyDocVersion(), rawBody) - var bucketDoc map[string]interface{} - _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) - assert.NoError(t, err) - body := rt.GetDocBody(docID) - // Confirm input body is in the bucket doc - if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { - for k, v := range test.inputBody { - assert.Equal(t, v, bucketDoc[k]) - assert.Equal(t, v, body[k]) + if test.expectReject { + assert.Error(t, err) + return } - } - }) - } + assert.NoError(t, err) + + // Wait for rev to be received on RT + err = rt.WaitForPendingChanges() + require.NoError(t, err) + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) + require.NoError(t, err) + + var bucketDoc map[string]interface{} + _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) + assert.NoError(t, err) + body := rt.GetDocBody(docID) + // Confirm input body is in the bucket doc + if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { + for k, v := range test.inputBody { + assert.Equal(t, v, bucketDoc[k]) + assert.Equal(t, v, body[k]) + } + } + }) + } + }) } // CBG-2053: Test that the handleRev stats still increment correctly when going through the processRev function with @@ -2541,120 +2567,129 @@ func TestSendRevisionNoRevHandling(t *testing.T) { expectNoRev: false, }, } - for _, test := range testCases { - t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { - docName := fmt.Sprintf("%s", test.error) - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), - }) - defer rt.Close() - - leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) - require.True(t, ok) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - // Change noRev handler so it's known when a noRev is received - recievedNoRevs := make(chan *blip.Message) - btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { - fmt.Println("Received noRev", msg.Properties) - recievedNoRevs <- msg - } - - version := rt.PutDoc(docName, `{"foo":"bar"}`) - - // Make the LeakyBucket return an error - leakyDataStore.SetGetRawCallback(func(key string) error { - return test.error - }) - leakyDataStore.SetGetWithXattrCallback(func(key string) error { - return test.error - }) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + for _, test := range testCases { + t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { + docName := fmt.Sprintf("%s", test.error) + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), + }) + defer rt.Close() + + leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) + require.True(t, ok) + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + // Change noRev handler so it's known when a noRev is received + recievedNoRevs := make(chan *blip.Message) + btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { + fmt.Println("Received noRev", msg.Properties) + recievedNoRevs <- msg + } - // Flush cache so document has to be retrieved from the leaky bucket - rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + version := rt.PutDoc(docName, `{"foo":"bar"}`) - err = btc.StartPull() - require.NoError(t, err) + // Make the LeakyBucket return an error + leakyDataStore.SetGetRawCallback(func(key string) error { + return test.error + }) + leakyDataStore.SetGetWithXattrCallback(func(key string) error { + return test.error + }) - // Wait 3 seconds for noRev to be received - select { - case msg := <-recievedNoRevs: - if test.expectNoRev { - assert.Equal(t, docName, msg.Properties["id"]) - } else { - require.Fail(t, "Received unexpected noRev message", msg) - } - case <-time.After(3 * time.Second): - if test.expectNoRev { - require.Fail(t, "Didn't receive expected noRev") + // Flush cache so document has to be retrieved from the leaky bucket + rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + + err := btcRunner.StartPull(btc.id) + require.NoError(t, err) + + // Wait 3 seconds for noRev to be received + select { + case msg := <-recievedNoRevs: + if test.expectNoRev { + assert.Equal(t, docName, msg.Properties["id"]) + } else { + require.Fail(t, "Received unexpected noRev message", msg) + } + case <-time.After(3 * time.Second): + if test.expectNoRev { + require.Fail(t, "Didn't receive expected noRev") + } } - } - // Make sure document did not get replicated - _, found := btc.GetVersion(docName, version) - assert.False(t, found) - }) - } + // Make sure document did not get replicated + _, found := btcRunner.GetVersion(btc.id, docName, version) + assert.False(t, found) + }) + } + }) } func TestUnsubChanges(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - // Confirm no error message or panic is returned in response - response, err := btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - - // Sub changes - err = btc.StartPull() - require.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1ID" doc2ID = "doc2ID" ) - doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) - _, found := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, found) - activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous - require.EqualValues(t, 1, activeReplStat.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Unsub changes - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - // Wait for unsub changes to stop the sub changes being sent before sending document up - base.RequireWaitForStat(t, activeReplStat.Value, 0) - - // Confirm no more changes are being sent - doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) - err = rt.WaitForConditionWithOptions(func() bool { - _, found = btc.GetVersion("doc2", doc2Version) - return found - }, 10, 100) - assert.Error(t, err) - - // Confirm no error message is still returned when no subchanges active - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + // Confirm no error message or panic is returned in response + response, err := btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) - // Confirm the pull replication can be restarted and it syncs doc2 - err = btc.StartPull() - require.NoError(t, err) - _, found = btc.WaitForVersion(doc2ID, doc2Version) - assert.True(t, found) + // Sub changes + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + + doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, found) + + activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous + require.EqualValues(t, 1, activeReplStat.Value()) + + // Unsub changes + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + // Wait for unsub changes to stop the sub changes being sent before sending document up + base.RequireWaitForStat(t, activeReplStat.Value, 0) + + // Confirm no more changes are being sent + doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) + err = rt.WaitForConditionWithOptions(func() bool { + _, found = btcRunner.GetVersion(btc.id, "doc2", doc2Version) + return found + }, 10, 100) + assert.Error(t, err) + + // Confirm no error message is still returned when no subchanges active + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + + // Confirm the pull replication can be restarted and it syncs doc2 + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + _, found = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + assert.True(t, found) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the replication. @@ -2671,47 +2706,49 @@ func TestRequestPlusPull(t *testing.T) { } }`, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() - - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() + + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Start a regular one-shot pull - err = client.StartOneshotPullRequestPlus() - assert.NoError(t, err) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPullRequestPlus(client.id) + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the db config. @@ -2733,47 +2770,50 @@ func TestRequestPlusPullDbConfig(t *testing.T) { }, }, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Start a regular one-shot pull - err = client.StartOneshotPull() - assert.NoError(t, err) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPull(client.id) + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestBlipRefreshUser makes sure there is no panic if a user gets deleted during a replication @@ -2794,53 +2834,56 @@ func TestBlipRefreshUser(t *testing.T) { rtConfig := RestTesterConfig{ SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - const username = "bernard" - // Initialize blip tester client (will create user) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - Channels: []string{"chan1"}, - }) + const docID = "doc1" - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + const username = "bernard" + // Initialize blip tester client (will create user) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + Channels: []string{"chan1"}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // add chan1 explicitly - response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) - RequireStatus(t, response, http.StatusOK) + // add chan1 explicitly + response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) + RequireStatus(t, response, http.StatusOK) - const docID = "doc1" - version := rt.PutDoc(docID, `{"channels":["chan1"]}`) + version := rt.PutDoc(docID, `{"channels":["chan1"]}`) - // Start a regular one-shot pull - err = btc.StartPullSince("true", "0", "false") - require.NoError(t, err) + // Start a regular one-shot pull + err := btcRunner.StartPullSince(btc.id, "true", "0", "false") + require.NoError(t, err) - _, ok := btc.WaitForDoc(docID) - require.True(t, ok) + _, ok := btcRunner.WaitForDoc(btc.id, docID) + require.True(t, ok) - _, ok = btc.GetVersion(docID, version) - require.True(t, ok) + _, ok = btcRunner.GetVersion(btc.id, docID, version) + require.True(t, ok) - // delete user with an active blip connection - response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") - RequireStatus(t, response, http.StatusOK) + // delete user with an active blip connection + response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") + RequireStatus(t, response, http.StatusOK) - require.NoError(t, rt.WaitForPendingChanges()) + require.NoError(t, rt.WaitForPendingChanges()) - // further requests will 500, but shouldn't panic - unsubChangesRequest := blip.NewRequest() - unsubChangesRequest.SetProfile(db.MessageUnsubChanges) - btc.addCollectionProperty(unsubChangesRequest) + // further requests will 500, but shouldn't panic + unsubChangesRequest := blip.NewRequest() + unsubChangesRequest.SetProfile(db.MessageUnsubChanges) + btc.addCollectionProperty(unsubChangesRequest) - err = btc.pullReplication.sendMsg(unsubChangesRequest) - require.NoError(t, err) + err = btc.pullReplication.sendMsg(unsubChangesRequest) + require.NoError(t, err) - testResponse := unsubChangesRequest.Response() - require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) - body, err := testResponse.Body() - require.NoError(t, err) - require.NotContains(t, string(body), "Panic:") + testResponse := unsubChangesRequest.Response() + require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) + body, err := testResponse.Body() + require.NoError(t, err) + require.NotContains(t, string(body), "Panic:") + }) } diff --git a/rest/blip_api_delta_sync_test.go b/rest/blip_api_delta_sync_test.go index 74651c909e..1bb5e7aaaa 100644 --- a/rest/blip_api_delta_sync_test.go +++ b/rest/blip_api_delta_sync_test.go @@ -30,60 +30,63 @@ func TestBlipDeltaSyncPushAttachment(t *testing.T) { if !base.IsEnterpriseEdition() { t.Skip("Delta test requires EE") } + rtConfig := &RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + DeltaSync: &DeltaSyncConfig{ + Enabled: base.BoolPtr(true), + }, + }}, + GuestEnabled: true, + } const docID = "pushAttachmentDoc" - rt := NewRestTester(t, - &RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - DeltaSync: &DeltaSyncConfig{ - Enabled: base.BoolPtr(true), - }, - }}, - GuestEnabled: true, - }) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Push first rev - version, err := btc.PushRev(docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) - require.NoError(t, err) + // Push first rev + version, err := btcRunner.PushRev(btc.id, docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) + require.NoError(t, err) - // Push second rev with an attachment (no delta yet) - attData := base64.StdEncoding.EncodeToString([]byte("attach")) + // Push second rev with an attachment (no delta yet) + attData := base64.StdEncoding.EncodeToString([]byte("attach")) - version, err = btc.PushRev(docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) - require.NoError(t, err) + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) + require.NoError(t, err) - syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found := syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found := syncData.Attachments["myAttachment"] + assert.True(t, found) - // Turn deltas on - btc.ClientDeltas = true + // Turn deltas on + btc.ClientDeltas = true - // Get existing body with the stub attachment, insert a new property and push as delta. - body, found := btc.GetVersion(docID, version) - require.True(t, found) + // Get existing body with the stub attachment, insert a new property and push as delta. + body, found := btcRunner.GetVersion(btc.id, docID, version) + require.True(t, found) - newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) - require.NoError(t, err) + newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) + require.NoError(t, err) - _, err = btc.PushRev(docID, version, newBody) - require.NoError(t, err) + _, err = btcRunner.PushRev(btc.id, docID, version, newBody) + require.NoError(t, err) - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found = syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found = syncData.Attachments["myAttachment"] + assert.True(t, found) + }) } // Test pushing and pulling new attachments through delta sync @@ -106,59 +109,63 @@ func TestBlipDeltaSyncPushPullNewAttachment(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btc.ClientDeltas = true - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client by adding another attachment. - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 2) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - world, ok := attachments["world.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) - assert.Equal(t, float64(11), world["length"]) - assert.Equal(t, float64(2), world["revpos"]) - assert.Equal(t, true, world["stub"]) + btc.ClientDeltas = true + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + const docID = "doc1" + + // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client by adding another attachment. + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 2) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + world, ok := attachments["world.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) + assert.Equal(t, float64(11), world["length"]) + assert.Equal(t, float64(2), world["revpos"]) + assert.Equal(t, true, world["stub"]) + }) } // TestBlipDeltaSyncNewAttachmentPull tests that adding a new attachment in SG and replicated via delta sync adds the attachment @@ -175,84 +182,88 @@ func TestBlipDeltaSyncNewAttachmentPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const doc1ID = "doc1" - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - const doc1ID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment - version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) - - data, ok = client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - var dataMap map[string]interface{} - assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) - atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, atts, 1) - hello, ok := atts["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // message #3 is the getAttachment message that is sent in-between rev processing - msg, ok := client.pullReplication.WaitForMessage(3) - assert.True(t, ok) - assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") - - // Check EE is delta, and CE is full-body replication - // msg, ok = client.pullReplication.WaitForMessage(5) - msg, ok = client.WaitForBlipRevMessage(doc1ID, version) - assert.True(t, ok) - - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) - assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) - } - respBody := rt.GetDocVersion(doc1ID, version) - assert.Equal(t, doc1ID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 2) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - atts = respBody[db.BodyAttachments].(map[string]interface{}) - assert.Len(t, atts, 1) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment + version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) + + data, ok = btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + var dataMap map[string]interface{} + assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) + atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, atts, 1) + hello, ok := atts["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // message #3 is the getAttachment message that is sent in-between rev processing + msg, ok := client.pullReplication.WaitForMessage(3) + assert.True(t, ok) + assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") + + // Check EE is delta, and CE is full-body replication + // msg, ok = client.pullReplication.WaitForMessage(5) + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, doc1ID, version) + assert.True(t, ok) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) + assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) + } + + respBody := rt.GetDocVersion(doc1ID, version) + assert.Equal(t, doc1ID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 2) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + atts = respBody[db.BodyAttachments].(map[string]interface{}) + assert.Len(t, atts, 1) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + }) } // TestBlipDeltaSyncPull tests that a simple pull replication uses deltas in EE, @@ -262,7 +273,7 @@ func TestBlipDeltaSyncPull(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -270,66 +281,67 @@ func TestBlipDeltaSyncPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - var deltaSentCount int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + var deltaSentCount int64 + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - // Check EE is delta, and CE is full-body replication - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - var afterDeltaSyncCount int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version) + assert.True(t, ok) + + // Check EE is delta, and CE is full-body replication + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + + var afterDeltaSyncCount int64 + if rt.GetDatabase().DbStats.DeltaSync() != nil { + afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + assert.Equal(t, deltaSentCount, afterDeltaSyncCount) } - - assert.Equal(t, deltaSentCount, afterDeltaSyncCount) - } + }) } // TestBlipDeltaSyncPullResend tests that a simple pull replication that uses a delta a client rejects will resend the revision in full. @@ -349,58 +361,61 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - docID := "doc1" - // create doc1 rev 1 - docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - // reject deltas built ontop of rev 1 - client.rejectDeltasForSrcRev = docVersion1.RevID - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - data, ok := client.WaitForVersion(docID, docVersion1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2 - docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = client.WaitForVersion(docID, docVersion2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - - // Check the request was initially sent with the correct deltaSrc property - assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - - msg, ok = client.WaitForBlipRevMessage(docID, docVersion2) - assert.True(t, ok) - - // Check the resent request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err = msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + + docID := "doc1" + // create doc1 rev 1 + docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + + // reject deltas built ontop of rev 1 + client.rejectDeltasForSrcRev = docVersion1.RevID + + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + data, ok := btcRunner.WaitForVersion(client.id, docID, docVersion1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2 + docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, docVersion2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + + // Check the request was initially sent with the correct deltaSrc property + assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, docID, docVersion2) + assert.True(t, ok) + + // Check the resent request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err = msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullRemoved tests a simple pull replication that drops a document out of the user's channel. @@ -419,43 +434,47 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer client.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test + const docID = "doc1" - err = client.StartPull() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() - const docID = "doc1" - // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 - version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"_removed":true}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_removed":true}`, string(msgBody)) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer client.Close() + + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 + version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"_removed":true}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_removed":true}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullTombstoned tests a simple pull replication that deletes a document. @@ -473,7 +492,7 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -483,78 +502,82 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - err = client.StartPull() - assert.NoError(t, err) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{}`, string(msgBody)) - assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) - - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + const docID = "doc1" + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{}`, string(msgBody)) + assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullTombstonedStarChan tests two clients can perform a simple pull replication that deletes a document when the user has access to the star channel. @@ -576,129 +599,133 @@ func TestBlipDeltaSyncPullTombstonedStarChan(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyHTTP, base.KeyCache, base.KeySync, base.KeySyncMsg) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + rtConfig := &RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - var deltaCacheHitsStart int64 - var deltaCacheMissesStart int64 - var deltasRequestedStart int64 - var deltasSentStart int64 + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - client1, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client1", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client1.Close() + var deltaCacheHitsStart int64 + var deltaCacheMissesStart int64 + var deltasRequestedStart int64 + var deltasSentStart int64 - client2, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client2", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client2.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client1", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client1.Close() - err = client1.StartPull() - require.NoError(t, err) + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client2", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client2.Close() - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // Have client2 get only rev-1 and then stop replicating - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok := client1.WaitForBlipRevMessage(docID, version) // docid, revid to get the message - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - msgBody, err := msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } + err := btcRunner.StartPull(client1.id) + require.NoError(t, err) - // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok = client2.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - msgBody, err = msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 + data, ok := btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + // Have client2 get only rev-1 and then stop replicating + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client1.id, docID, version) // docid, revid to get the message + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + msgBody, err := msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) - assert.Equal(t, deltasSentStart+2, deltasSentEnd) - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok = btcRunner.WaitForBlipRevMessage(client2.id, docID, version) + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + msgBody, err = msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) + assert.Equal(t, deltasSentStart+2, deltasSentEnd) + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullRevCache tests that a simple pull replication uses deltas in EE, @@ -720,79 +747,80 @@ func TestBlipDeltaSyncPullRevCache(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // Perform a one-shot pull as client 2 to pull down the first revision - - client2, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client2.Close() - - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check EE is delta - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - - deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - msg2, ok := client2.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody2, err := msg2.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) - - updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) - assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // Perform a one-shot pull as client 2 to pull down the first revision + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client2.Close() + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version2) + assert.True(t, ok) + + // Check EE is delta + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + msg2, ok := btcRunner.WaitForBlipRevMessage(client2.id, docID, version2) + assert.True(t, ok) + + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody2, err := msg2.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) + + updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) + assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + }) } // TestBlipDeltaSyncPush tests that a simple push replication handles deltas in EE, @@ -809,96 +837,100 @@ func TestBlipDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true - - err = client.StartPull() - assert.NoError(t, err) - - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abc on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - // Validate that generation of a delta didn't mutate the revision body in the revision cache - docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) - assert.NoError(t, cacheErr) - assert.NotContains(t, docRev.BodyBytes, "bob") - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abc on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - } - respBody := rt.GetDocVersion(docID, newRev) - assert.Equal(t, "doc1", respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 3) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + // Validate that generation of a delta didn't mutate the revision body in the revision cache + docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) + assert.NoError(t, cacheErr) + assert.NotContains(t, docRev.BodyBytes, "bob") + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + } - // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) - deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) + respBody := rt.GetDocVersion(docID, newRev) + assert.Equal(t, "doc1", respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 3) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) - data, ok = client.WaitForVersion(docID, deletedVersion) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) + // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) + deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) - var deltaPushDocCountStart int64 + data, ok = btcRunner.WaitForVersion(client.id, docID, deletedVersion) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } + var deltaPushDocCountStart int64 - _, err = client.PushRev(docID, deletedVersion, []byte(`{"undelete":true}`)) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } - if base.IsEnterpriseEdition() { - // Now make the client push up a delta that has the parent of the tombstone. - // This is not a valid scenario, and is actively prevented on the CBL side. - assert.Error(t, err) - assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") - } else { - // Pushing a full body revision on top of a tombstone is valid. - // CBL clients should fall back to this. The test client doesn't. - assert.NoError(t, err) - } + _, err = btcRunner.PushRev(client.id, docID, deletedVersion, []byte(`{"undelete":true}`)) + + if base.IsEnterpriseEdition() { + // Now make the client push up a delta that has the parent of the tombstone. + // This is not a valid scenario, and is actively prevented on the CBL side. + assert.Error(t, err) + assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") + } else { + // Pushing a full body revision on top of a tombstone is valid. + // CBL clients should fall back to this. The test client doesn't. + assert.NoError(t, err) + } - var deltaPushDocCountEnd int64 + var deltaPushDocCountEnd int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } - assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } + assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + }) } // TestBlipNonDeltaSyncPush tests that a client that doesn't support deltas can push to a SG that supports deltas (either CE or EE) @@ -914,41 +946,45 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - client.ClientDeltas = false - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abcxyz on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) - - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - - body := rt.GetDocVersion("doc1", newRev) - require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + client.ClientDeltas = false + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abcxyz on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + assert.NoError(t, err) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + + body := rt.GetDocVersion("doc1", newRev) + require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + }) } diff --git a/rest/blip_api_no_race_test.go b/rest/blip_api_no_race_test.go index f6e35f9cf1..70688eb559 100644 --- a/rest/blip_api_no_race_test.go +++ b/rest/blip_api_no_race_test.go @@ -44,65 +44,68 @@ func TestBlipPusherUpdateDatabase(t *testing.T) { GuestEnabled: true, CustomTestBucket: tb.NoCloseClone(), } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - var lastPushRevErr atomic.Value - - // Wait for the background updates to finish at the end of the test - shouldCreateDocs := base.NewAtomicBool(true) - wg := sync.WaitGroup{} - wg.Add(1) - defer func() { - shouldCreateDocs.Set(false) - wg.Wait() - }() - - // Start the test client creating and pushing documents in the background - go func() { - for i := 0; shouldCreateDocs.IsTrue(); i++ { - // this will begin to error when the database is reloaded underneath the replication - _, err := client.PushRev(fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) - if err != nil { - lastPushRevErr.Store(err) - } - } - _ = rt.WaitForPendingChanges() - wg.Done() - }() - - // and wait for a few to be done before we proceed with updating database config underneath replication - _, err = rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) - require.NoError(t, err) - - // just change the sync function to cause the database to reload - dbConfig := *rt.ServerContext().GetDbConfig("db") - dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) - resp := rt.ReplaceDbConfig("db", dbConfig) - RequireStatus(t, resp, http.StatusCreated) - - // Did we tell the client to close the connection (via HTTP/503)? - // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. - WaitAndAssertCondition(t, func() bool { - lastErr, ok := lastPushRevErr.Load().(error) - if !ok { - return false - } - if lastErr == nil { - return false - } - lastErrMsg := lastErr.Error() - if !strings.Contains(lastErrMsg, "HTTP 503") { - return false - } - if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { - return false - } - return true - }, "expected HTTP 503 error") + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + + var lastPushRevErr atomic.Value + + // Wait for the background updates to finish at the end of the test + shouldCreateDocs := base.NewAtomicBool(true) + wg := sync.WaitGroup{} + wg.Add(1) + defer func() { + shouldCreateDocs.Set(false) + wg.Wait() + }() + + // Start the test client creating and pushing documents in the background + go func() { + for i := 0; shouldCreateDocs.IsTrue(); i++ { + // this will begin to error when the database is reloaded underneath the replication + _, err := btcRunner.PushRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) + if err != nil { + lastPushRevErr.Store(err) + } + } + _ = rt.WaitForPendingChanges() + wg.Done() + }() + + // and wait for a few to be done before we proceed with updating database config underneath replication + _, err := rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) + require.NoError(t, err) + + // just change the sync function to cause the database to reload + dbConfig := *rt.ServerContext().GetDbConfig("db") + dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) + resp := rt.ReplaceDbConfig("db", dbConfig) + RequireStatus(t, resp, http.StatusCreated) + + // Did we tell the client to close the connection (via HTTP/503)? + // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. + WaitAndAssertCondition(t, func() bool { + lastErr, ok := lastPushRevErr.Load().(error) + if !ok { + return false + } + if lastErr == nil { + return false + } + lastErrMsg := lastErr.Error() + if !strings.Contains(lastErrMsg, "HTTP 503") { + return false + } + if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { + return false + } + return true + }, "expected HTTP 503 error") + }) } diff --git a/rest/blip_client_test.go b/rest/blip_client_test.go index f3e808aae0..c663a14f6a 100644 --- a/rest/blip_client_test.go +++ b/rest/blip_client_test.go @@ -46,6 +46,7 @@ type BlipTesterClientOpts struct { type BlipTesterClient struct { BlipTesterClientOpts + id uint32 // unique ID for the client rt *RestTester pullReplication *BlipTesterReplicator // SG -> CBL replications pushReplication *BlipTesterReplicator // CBL -> SG replications @@ -69,6 +70,14 @@ type BlipTesterCollectionClient struct { lastReplicatedRevLock sync.RWMutex // lock for lastReplicatedRev map } +// BlipTestClientRunner is for running the blip tester client and its associated methods in test framework +type BlipTestClientRunner struct { + clients map[uint32]*BlipTesterClient // map of created BlipTesterClient's + t *testing.T + initialisedInsideRunnerCode bool // flag to check that the BlipTesterClient is being initialised in the correct area (inside the Run() method) + SkipVersionVectorInitialization bool // used to skip the version vector subtest +} + type BodyMessagePair struct { body []byte message *blip.Message @@ -85,6 +94,14 @@ type BlipTesterReplicator struct { replicationStats *db.BlipSyncStats // Stats of replications } +// NewBlipTesterClientRunner creates a BlipTestClientRunner type +func NewBlipTesterClientRunner(t *testing.T) *BlipTestClientRunner { + return &BlipTestClientRunner{ + t: t, + clients: make(map[uint32]*BlipTesterClient), + } +} + func (btr *BlipTesterReplicator) Close() { btr.bt.Close() btr.messagesLock.Lock() @@ -571,33 +588,80 @@ func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { return collections } -func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { +func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { + if !btcRunner.initialisedInsideRunnerCode { + btcRunner.t.Fatalf("must initialise BlipTesterClient inside Run() method") + } if opts == nil { opts = &BlipTesterClientOpts{} } - btc := BlipTesterClient{ + id, err := uuid.NewRandom() + require.NoError(btcRunner.t, err) + + client = &BlipTesterClient{ BlipTesterClientOpts: *opts, rt: rt, + id: id.ID(), + } + btcRunner.clients[client.id] = client + err = client.createBlipTesterReplications() + require.NoError(btcRunner.t, err) + + return client +} + +func (btc *BlipTesterClient) Close() { + btc.tearDownBlipClientReplications() + for _, collectionClient := range btc.collectionClients { + collectionClient.Close() } + if btc.nonCollectionAwareClient != nil { + btc.nonCollectionAwareClient.Close() + } +} + +func (btcRunner *BlipTestClientRunner) Run(test func(t *testing.T, SupportedBLIPProtocols []string)) { + btcRunner.initialisedInsideRunnerCode = true + // reset to protect against someone creating a new client after Run() is run + defer func() { btcRunner.initialisedInsideRunnerCode = false }() + btcRunner.t.Run("revTree", func(t *testing.T) { + test(t, []string{db.BlipCBMobileReplicationV3}) + }) + // if test is not wanting version vector subprotocol to be run, return before we start this subtest + if btcRunner.SkipVersionVectorInitialization { + return + } + btcRunner.t.Run("versionVector", func(t *testing.T) { + t.Skip("skip VV subtest on master") + // bump sub protocol version here and pass into test function pending CBG-3253 + test(t, nil) + }) +} +func (btc *BlipTesterClient) tearDownBlipClientReplications() { + btc.pullReplication.Close() + btc.pushReplication.Close() +} + +func (btc *BlipTesterClient) createBlipTesterReplications() error { id, err := uuid.NewRandom() if err != nil { - return nil, err + return err } - if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - collections := getCollectionsForBLIP(tb, rt) - if !opts.SkipCollectionsInitialization && len(collections) > 0 { + collections := getCollectionsForBLIP(btc.rt.TB, btc.rt) + if !btc.BlipTesterClientOpts.SkipCollectionsInitialization && len(collections) > 0 { btc.collectionClients = make([]*BlipTesterCollectionClient, len(collections)) for i, collection := range collections { if err := btc.initCollectionReplication(collection, i); err != nil { - return nil, err + return err } } } else { @@ -605,40 +669,14 @@ func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterC docs: make(map[string]map[string]*BodyMessagePair), attachments: make(map[string][]byte), lastReplicatedRev: make(map[string]string), - parent: &btc, + parent: btc, } - } - return &btc, nil -} - -// NewBlipTesterClient returns a client which emulates the behaviour of a CBL client over BLIP. -func NewBlipTesterClient(tb testing.TB, rt *RestTester) (client *BlipTesterClient, err error) { - return createBlipTesterClientOpts(tb, rt, nil) -} - -func NewBlipTesterClientOptsWithRT(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { - client, err = createBlipTesterClientOpts(tb, rt, opts) - if err != nil { - return nil, err - } - - client.pullReplication.bt.avoidRestTesterClose = true - client.pushReplication.bt.avoidRestTesterClose = true - - return client, nil -} + btc.pullReplication.bt.avoidRestTesterClose = true + btc.pushReplication.bt.avoidRestTesterClose = true -func (btc *BlipTesterClient) Close() { - btc.pullReplication.Close() - btc.pushReplication.Close() - for _, collectionClient := range btc.collectionClients { - collectionClient.Close() - } - if btc.nonCollectionAwareClient != nil { - btc.nonCollectionAwareClient.Close() - } + return nil } func (btc *BlipTesterClient) initCollectionReplication(collection string, collectionIdx int) error { @@ -668,25 +706,25 @@ func (btc *BlipTesterClient) waitForReplicationMessage(collection *db.DatabaseCo } // SingleCollection returns a single collection blip tester if the RestTester database is configured with only one collection. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) SingleCollection() *BlipTesterCollectionClient { - if btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) SingleCollection(clientID uint32) *BlipTesterCollectionClient { + if btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - require.Equal(btc.rt.TB, 1, len(btc.collectionClients)) - return btc.collectionClients[0] + require.Equal(btcRunner.clients[clientID].rt.TB, 1, len(btcRunner.clients[clientID].collectionClients)) + return btcRunner.clients[clientID].collectionClients[0] } // Collection return a collection blip tester by name, if configured in the RestTester database. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) Collection(collectionName string) *BlipTesterCollectionClient { - if collectionName == "_default._default" && btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) Collection(clientID uint32, collectionName string) *BlipTesterCollectionClient { + if collectionName == "_default._default" && btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - for _, collectionClient := range btc.collectionClients { + for _, collectionClient := range btcRunner.clients[clientID].collectionClients { if collectionClient.collection == collectionName { return collectionClient } } - btc.rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) + btcRunner.clients[clientID].rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) return nil } @@ -1126,81 +1164,81 @@ func (btc *BlipTesterCollectionClient) GetBlipRevMessage(docID, revID string) (m return nil, false } -func (btc *BlipTesterClient) StartPull() error { - return btc.SingleCollection().StartPull() +func (btcRunner *BlipTestClientRunner) StartPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartPull() } // WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. -func (btc *BlipTesterClient) WaitForVersion(docID string, docVersion DocVersion) (data []byte, found bool) { - return btc.SingleCollection().WaitForVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForVersion(clientID uint32, docID string, docVersion DocVersion) (data []byte, found bool) { + return btcRunner.SingleCollection(clientID).WaitForVersion(docID, docVersion) } -func (btc *BlipTesterClient) WaitForDoc(docID string) ([]byte, bool) { - return btc.SingleCollection().WaitForDoc(docID) +func (btcRunner *BlipTestClientRunner) WaitForDoc(clientID uint32, docID string) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).WaitForDoc(docID) } -func (btc *BlipTesterClient) WaitForBlipRevMessage(docID string, docVersion DocVersion) (*blip.Message, bool) { - return btc.SingleCollection().WaitForBlipRevMessage(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForBlipRevMessage(clientID uint32, docID string, docVersion DocVersion) (*blip.Message, bool) { + return btcRunner.SingleCollection(clientID).WaitForBlipRevMessage(docID, docVersion) } -func (btc *BlipTesterClient) StartOneshotPull() error { - return btc.SingleCollection().StartOneshotPull() +func (btcRunner *BlipTestClientRunner) StartOneshotPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPull() } -func (btc *BlipTesterClient) StartOneshotPullFiltered(channels string) error { - return btc.SingleCollection().StartOneshotPullFiltered(channels) +func (btcRunner *BlipTestClientRunner) StartOneshotPullFiltered(clientID uint32, channels string) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullFiltered(channels) } -func (btc *BlipTesterClient) StartOneshotPullRequestPlus() error { - return btc.SingleCollection().StartOneshotPullRequestPlus() +func (btcRunner *BlipTestClientRunner) StartOneshotPullRequestPlus(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullRequestPlus() } -func (btc *BlipTesterClient) PushRev(docID string, version DocVersion, body []byte) (DocVersion, error) { - return btc.SingleCollection().PushRev(docID, version, body) +func (btcRunner *BlipTestClientRunner) PushRev(clientID uint32, docID string, version DocVersion, body []byte) (DocVersion, error) { + return btcRunner.SingleCollection(clientID).PushRev(docID, version, body) } -func (btc *BlipTesterClient) StartPullSince(continuous, since, activeOnly string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, "", "") +func (btcRunner *BlipTestClientRunner) StartPullSince(clientID uint32, continuous, since, activeOnly string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, "", "") } -func (btc *BlipTesterClient) StartFilteredPullSince(continuous, since, activeOnly string, channels string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, channels, "") +func (btcRunner *BlipTestClientRunner) StartFilteredPullSince(clientID uint32, continuous, since, activeOnly, channels string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, channels, "") } -func (btc *BlipTesterClient) GetVersion(docID string, docVersion DocVersion) ([]byte, bool) { - return btc.SingleCollection().GetVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) GetVersion(clientID uint32, docID string, docVersion DocVersion) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).GetVersion(docID, docVersion) } -func (btc *BlipTesterClient) saveAttachment(contentType string, attachmentData string) (int, string, error) { - return btc.SingleCollection().saveAttachment(contentType, attachmentData) +func (btcRunner *BlipTestClientRunner) saveAttachment(clientID uint32, contentType string, attachmentData string) (int, string, error) { + return btcRunner.SingleCollection(clientID).saveAttachment(contentType, attachmentData) } -func (btc *BlipTesterClient) StoreRevOnClient(docID, revID string, body []byte) error { - return btc.SingleCollection().StoreRevOnClient(docID, revID, body) +func (btcRunner *BlipTestClientRunner) StoreRevOnClient(clientID uint32, docID, revID string, body []byte) error { + return btcRunner.SingleCollection(clientID).StoreRevOnClient(docID, revID, body) } -func (btc *BlipTesterClient) PushRevWithHistory(docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { - return btc.SingleCollection().PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) +func (btcRunner *BlipTestClientRunner) PushRevWithHistory(clientID uint32, docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { + return btcRunner.SingleCollection(clientID).PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) } -func (btc *BlipTesterClient) AttachmentsLock() *sync.RWMutex { - return &btc.SingleCollection().attachmentsLock +func (btcRunner *BlipTestClientRunner) AttachmentsLock(clientID uint32) *sync.RWMutex { + return &btcRunner.SingleCollection(clientID).attachmentsLock } func (btc *BlipTesterCollectionClient) AttachmentsLock() *sync.RWMutex { return &btc.attachmentsLock } -func (btc *BlipTesterClient) Attachments() map[string][]byte { - return btc.SingleCollection().attachments +func (btcRunner *BlipTestClientRunner) Attachments(clientID uint32) map[string][]byte { + return btcRunner.SingleCollection(clientID).attachments } func (btc *BlipTesterCollectionClient) Attachments() map[string][]byte { return btc.attachments } -func (btc *BlipTesterClient) UnsubPullChanges() ([]byte, error) { - return btc.SingleCollection().UnsubPullChanges() +func (btcRunner *BlipTestClientRunner) UnsubPullChanges(clientID uint32) ([]byte, error) { + return btcRunner.SingleCollection(clientID).UnsubPullChanges() } func (btc *BlipTesterCollectionClient) addCollectionProperty(msg *blip.Message) { diff --git a/rest/bulk_api.go b/rest/bulk_api.go index a5543c7b07..83358e7c49 100644 --- a/rest/bulk_api.go +++ b/rest/bulk_api.go @@ -264,9 +264,7 @@ func (h *handler) handleDump() error { func (h *handler) handleRepair() error { // TODO: If repair is re-enabled, it may need to be modified to support xattrs and GSI - if true == true { - return errors.New("_repair endpoint disabled") - } + return errors.New("_repair endpoint disabled") /*base.InfofCtx(h.ctx(), base.KeyHTTP, "Repair bucket") @@ -305,8 +303,6 @@ func (h *handler) handleRepair() error { return err */ - - return nil } // HTTP handler for _dumpchannel diff --git a/rest/config.go b/rest/config.go index 41a9671bbd..8e10fc925b 100644 --- a/rest/config.go +++ b/rest/config.go @@ -1467,11 +1467,11 @@ func (sc *ServerContext) migrateV30Configs(ctx context.Context) error { continue } - base.InfofCtx(ctx, base.KeyConfig, "Found legacy persisted config for database %s - migrating to db registry.", base.MD(dbConfig.Name)) + base.InfofCtx(ctx, base.KeyConfig, "Found legacy persisted config for database %s in bucket %s, groupID %s - migrating to db registry.", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID)) _, insertErr := sc.BootstrapContext.InsertConfig(ctx, bucketName, groupID, &dbConfig) if insertErr != nil { if insertErr == base.ErrAlreadyExists { - base.DebugfCtx(ctx, base.KeyConfig, "Found legacy config for database %s, but already exists in registry.", base.MD(dbConfig.Name)) + base.DebugfCtx(ctx, base.KeyConfig, "Found legacy config for database %s in bucket %s, groupID: %s, but already exists in registry.", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID)) } else { base.InfofCtx(ctx, base.KeyConfig, "Unable to persist migrated v3.0 config for bucket %s groupID %s: %s", base.MD(bucketName), base.MD(groupID), insertErr) continue @@ -1479,7 +1479,7 @@ func (sc *ServerContext) migrateV30Configs(ctx context.Context) error { } removeErr := sc.BootstrapContext.Connection.DeleteMetadataDocument(ctx, bucketName, PersistentConfigKey30(ctx, groupID), legacyCas) if removeErr != nil { - base.InfofCtx(ctx, base.KeyConfig, "Failed to remove legacy config for database %s: %s", base.MD(dbConfig.Name), removeErr) + base.InfofCtx(ctx, base.KeyConfig, "Failed to remove legacy config for database %s in bucket %s, groupID %s: %s", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID), base.MD(removeErr)) } } return nil @@ -1564,7 +1564,7 @@ func (sc *ServerContext) _fetchDatabase(ctx context.Context, dbName string) (fou cnf.CertPath = sc.Config.Bootstrap.X509CertPath cnf.KeyPath = sc.Config.Bootstrap.X509KeyPath } - base.TracefCtx(ctx, base.KeyConfig, "Got config for bucket %q with cas %d", bucket, cas) + base.TracefCtx(ctx, base.KeyConfig, "Got database config %s for bucket %q with cas %d and groupID %q", base.MD(dbName), base.MD(bucket), cas, base.MD(sc.Config.Bootstrap.ConfigGroupID)) return true, nil } @@ -1686,20 +1686,20 @@ func (sc *ServerContext) FetchConfigs(ctx context.Context, isInitialStartup bool fetchedConfigs := make(map[string]DatabaseConfig, len(buckets)) for _, bucket := range buckets { - base.TracefCtx(ctx, base.KeyConfig, "Checking for configs for group %q from bucket %q", sc.Config.Bootstrap.ConfigGroupID, bucket) + base.TracefCtx(ctx, base.KeyConfig, "Checking for configs for group %q from bucket %q", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket)) configs, err := sc.BootstrapContext.GetDatabaseConfigs(ctx, bucket, sc.Config.Bootstrap.ConfigGroupID) if err != nil { // Unexpected error fetching config - SDK has already performed retries, so we'll treat it as a registry removal // this could be due to invalid JSON or some other non-recoverable error. if isInitialStartup { - base.WarnfCtx(ctx, "Unable to fetch config for group %q from bucket %q on startup: %v", sc.Config.Bootstrap.ConfigGroupID, bucket, err) + base.WarnfCtx(ctx, "Unable to fetch configs for group %q from bucket %q on startup: %v", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket), err) } else { - base.DebugfCtx(ctx, base.KeyConfig, "Unable to fetch config for group %q from bucket %q: %v", sc.Config.Bootstrap.ConfigGroupID, bucket, err) + base.DebugfCtx(ctx, base.KeyConfig, "Unable to fetch configs for group %q from bucket %q: %v", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket), err) } continue } if len(configs) == 0 { - base.DebugfCtx(ctx, base.KeyConfig, "Bucket %q did not contain config for group %q", bucket, sc.Config.Bootstrap.ConfigGroupID) + base.DebugfCtx(ctx, base.KeyConfig, "Bucket %q did not contain any configs for group %q", base.MD(bucket), sc.Config.Bootstrap.ConfigGroupID) continue } for _, cnf := range configs { @@ -1776,7 +1776,7 @@ func (sc *ServerContext) _applyConfig(nonContextStruct base.NonCancellableContex configSGVersionStr = cnf.SGVersion } - configSGVersion, err := base.NewComparableVersionFromString(configSGVersionStr) + configSGVersion, err := base.NewComparableBuildVersionFromString(configSGVersionStr) if err != nil { return false, err } diff --git a/rest/config_database.go b/rest/config_database.go index 6a97f1fbcd..cf2820a2dc 100644 --- a/rest/config_database.go +++ b/rest/config_database.go @@ -32,7 +32,7 @@ type DatabaseConfig struct { // Version is a generated Rev ID used for optimistic concurrency control using ETags/If-Match headers. Version string `json:"version,omitempty"` - // SGVersion is a base.ComparableVersion of the Sync Gateway node that wrote the config. + // SGVersion is a base.ComparableBuildVersion of the Sync Gateway node that wrote the config. SGVersion string `json:"sg_version,omitempty"` // MetadataID is the prefix used to store database metadata diff --git a/rest/config_manager.go b/rest/config_manager.go index 1cab53d351..5e3cec8279 100644 --- a/rest/config_manager.go +++ b/rest/config_manager.go @@ -30,10 +30,10 @@ type ConfigManager interface { DeleteConfig(ctx context.Context, bucket, dbName, groupID string) (err error) // CheckMinorDowngrade returns an error the sgVersion represents at least minor version downgrade from the version in the bucket. - CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error + CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error // SetSGVersion updates the Sync Gateway version in the bucket registry - SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error + SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error } type dbConfigNameOnly struct { @@ -590,7 +590,7 @@ func (b *bootstrapContext) getGatewayRegistry(ctx context.Context, bucketName st if registry.SGVersion.String() == "" { // 3.1.0 and 3.1.1 don't write a SGVersion, but everything else will configSGVersionStr := "3.1.0" - v, err := base.NewComparableVersionFromString(configSGVersionStr) + v, err := base.NewComparableBuildVersionFromString(configSGVersionStr) if err != nil { return nil, err } @@ -785,7 +785,7 @@ func (b *bootstrapContext) standardMetadataID(dbName string) string { } // CheckMinorDowngrade returns an error the sgVersion represents at least minor version downgrade from the version in the bucket. -func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error { +func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error { registry, err := b.getGatewayRegistry(ctx, bucketName) if err != nil { return err @@ -800,7 +800,7 @@ func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName s } // SetSGVersion will update the registry in a bucket with a version of Sync Gateway. This will not perform a write if the version is already up to date. -func (b *bootstrapContext) SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error { +func (b *bootstrapContext) SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error { registry, err := b.getGatewayRegistry(ctx, bucketName) if err != nil { return err diff --git a/rest/config_manager_test.go b/rest/config_manager_test.go index 57b2722546..ad99028da4 100644 --- a/rest/config_manager_test.go +++ b/rest/config_manager_test.go @@ -226,7 +226,7 @@ func TestVersionDowngrade(t *testing.T) { } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - syncGatewayVersion, err := base.NewComparableVersionFromString(test.syncGatewayVersion) + syncGatewayVersion, err := base.NewComparableBuildVersionFromString(test.syncGatewayVersion) require.NoError(t, err) rt := NewRestTester(t, &RestTesterConfig{ PersistentConfig: true, @@ -240,7 +240,7 @@ func TestVersionDowngrade(t *testing.T) { require.NoError(t, err) require.True(t, syncGatewayVersion.Equal(®istry.SGVersion), "%+v != %+v", syncGatewayVersion, registry.SGVersion) - metadataConfigVersion, err := base.NewComparableVersionFromString(test.metadataConfigVersion) + metadataConfigVersion, err := base.NewComparableBuildVersionFromString(test.metadataConfigVersion) registry.SGVersion = *metadataConfigVersion require.NoError(t, err) require.NoError(t, bootstrapContext.setGatewayRegistry(rt.Context(), rt.Bucket().GetName(), registry)) @@ -258,7 +258,7 @@ func TestVersionDowngrade(t *testing.T) { registry, err = bootstrapContext.getGatewayRegistry(rt.Context(), rt.Bucket().GetName()) require.NoError(t, err) - expectedRegistryVersion, err := base.NewComparableVersionFromString(test.expectedRegistryVersion) + expectedRegistryVersion, err := base.NewComparableBuildVersionFromString(test.expectedRegistryVersion) require.NoError(t, err) require.True(t, expectedRegistryVersion.Equal(®istry.SGVersion), "%+v != %+v", expectedRegistryVersion, registry.SGVersion) diff --git a/rest/config_registry.go b/rest/config_registry.go index 5ae31d674a..3ce843894b 100644 --- a/rest/config_registry.go +++ b/rest/config_registry.go @@ -46,7 +46,7 @@ type GatewayRegistry struct { cas uint64 Version string `json:"version"` // Registry version ConfigGroups map[string]*RegistryConfigGroup `json:"config_groups"` // Map of config groups, keyed by config group ID - SGVersion base.ComparableVersion `json:"sg_version"` // Latest patch version of Sync Gateway that touched the registry + SGVersion base.ComparableBuildVersion `json:"sg_version"` // Latest patch version of Sync Gateway that touched the registry } const GatewayRegistryVersion = "1.0" @@ -84,7 +84,7 @@ type RegistryScope struct { var defaultOnlyRegistryScopes = map[string]RegistryScope{base.DefaultScope: {Collections: []string{base.DefaultCollection}}} var DefaultOnlyScopesConfig = ScopesConfig{base.DefaultScope: {Collections: map[string]*CollectionConfig{base.DefaultCollection: {}}}} -func NewGatewayRegistry(syncGatewayVersion base.ComparableVersion) *GatewayRegistry { +func NewGatewayRegistry(syncGatewayVersion base.ComparableBuildVersion) *GatewayRegistry { return &GatewayRegistry{ ConfigGroups: make(map[string]*RegistryConfigGroup), Version: GatewayRegistryVersion, diff --git a/rest/config_test.go b/rest/config_test.go index 1a11dc2f29..9302e2d966 100644 --- a/rest/config_test.go +++ b/rest/config_test.go @@ -1431,8 +1431,8 @@ func TestSetupServerContext(t *testing.T) { config.Bootstrap.Password = base.TestClusterPassword() ctx := base.TestCtx(t) sc, err := SetupServerContext(ctx, &config, false) - defer sc.Close(ctx) require.NoError(t, err) + defer sc.Close(ctx) require.NotNil(t, sc) }) } diff --git a/rest/importtest/collections_import_test.go b/rest/importtest/collections_import_test.go index 79e670efb2..b872f9f4c9 100644 --- a/rest/importtest/collections_import_test.go +++ b/rest/importtest/collections_import_test.go @@ -28,7 +28,7 @@ func TestMultiCollectionImportFilter(t *testing.T) { base.RequireNumTestDataStores(t, 3) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) scopesConfig := rest.GetCollectionsConfig(t, testBucket, 2) @@ -250,7 +250,7 @@ func TestMultiCollectionImportDynamicAddCollection(t *testing.T) { base.RequireNumTestDataStores(t, 2) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) rtConfig := &rest.RestTesterConfig{ @@ -346,7 +346,7 @@ func TestMultiCollectionImportRemoveCollection(t *testing.T) { base.RequireNumTestDataStores(t, numCollections) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) rtConfig := &rest.RestTesterConfig{ diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go index 0fa9f61b82..3a7d0f3d55 100644 --- a/rest/importtest/import_test.go +++ b/rest/importtest/import_test.go @@ -2735,7 +2735,7 @@ func TestImportRollback(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport, base.KeyDCP) ctx := base.TestCtx(t) - bucket := base.GetPersistentTestBucket(t) + bucket := base.GetTestBucket(t) defer bucket.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ diff --git a/rest/multipart_test.go b/rest/multipart_test.go index 8422e7a1d1..4647308167 100644 --- a/rest/multipart_test.go +++ b/rest/multipart_test.go @@ -16,7 +16,6 @@ import ( "fmt" "io" "log" - "math/rand" "mime/multipart" "net/http" "strconv" @@ -162,8 +161,7 @@ func TestWriteJSONPart(t *testing.T) { // writeJSONPart toggles compression to false if the incoming body is less than 300 bytes, so creating // a body larger than 300 bytes to test writeJSONPart with compression=true and compression=false mockFakeBody := func() db.Body { - bytes := make([]byte, 139) - rand.Read(bytes) + bytes := base.FastRandBytes(t, 139) value := fmt.Sprintf("%x", bytes) return db.Body{"key": "foo", "value": value} } diff --git a/rest/oidc_api_test.go b/rest/oidc_api_test.go index e063c770e8..ff057a1727 100644 --- a/rest/oidc_api_test.go +++ b/rest/oidc_api_test.go @@ -2451,10 +2451,6 @@ func mustMarshalJSON(t testing.TB, val interface{}) []byte { // Checks that we correctly handle the removal of an OIDC provider while it's in use func TestOpenIDConnectProviderRemoval(t *testing.T) { - if base.UnitTestUrlIsWalrus() { - // Requires persistent config - t.Skip("This test only works against Couchbase Server") - } const ( providerName = "foo" @@ -2535,7 +2531,13 @@ func TestOpenIDConnectProviderRemoval(t *testing.T) { UserCtx db.Body `json:"userCtx"` } require.NoError(t, base.JSONUnmarshal(res.Body.Bytes(), &sessionResponse)) - require.Nil(t, sessionResponse.UserCtx["channels"]) + // session response only contains non collection channels, and is blank if there is no default collection + if base.TestsUseNamedCollections() { + require.Nil(t, sessionResponse.UserCtx["channels"]) + } else { + require.NotContains(t, sessionResponse.UserCtx["channels"], testChannelName) + + } } // This test verifies the edge case of having two different OIDC providers with different role/channel configurations diff --git a/rest/revocation_test.go b/rest/revocation_test.go index 35359e5f1d..06897b002f 100644 --- a/rest/revocation_test.go +++ b/rest/revocation_test.go @@ -2223,297 +2223,313 @@ func TestReplicatorRevocationsFromZero(t *testing.T) { func TestRevocationMessage(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() - - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() + + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := rt.PutDoc("doc", `{"channels": "A"}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc("doc", `{"channels": "A"}`) - // Start pull - err = btc.StartOneshotPull() - assert.NoError(t, err) - - // Wait for doc revision to come over - _, ok := btc.WaitForBlipRevMessage("doc", version) - require.True(t, ok) + require.NoError(t, rt.WaitForPendingChanges()) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Start pull + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - const doc1ID = "doc1" - version = rt.PutDoc(doc1ID, `{"channels": "!"}`) + // Wait for doc revision to come over + _, ok := btcRunner.WaitForBlipRevMessage(btc.id, "doc", version) + require.True(t, ok) - revocationTester.fillToSeq(10) - version = rt.UpdateDoc(doc1ID, version, "{}") + // Remove role from user + revocationTester.removeRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) + const doc1ID = "doc1" + version = rt.PutDoc(doc1ID, `{"channels": "!"}`) - // Start a pull since 5 to receive revocation and removal - err = btc.StartPullSince("false", "5", "false") - assert.NoError(t, err) + revocationTester.fillToSeq(10) + version = rt.UpdateDoc(doc1ID, version, "{}") - // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here - _, found := btc.WaitForVersion(doc1ID, version) - require.True(t, found) - - messages := btc.pullReplication.GetMessages() - - testCases := []struct { - Name string - DocID string - ExpectedDeleted int64 - }{ - { - Name: "Revocation", - DocID: "doc", - ExpectedDeleted: int64(2), - }, - { - Name: "Removed", - DocID: "doc1", - ExpectedDeleted: int64(4), - }, - } + require.NoError(t, rt.WaitForPendingChanges()) - for _, testCase := range testCases { - t.Run(testCase.Name, func(t *testing.T) { - // Verify the deleted property in the changes message is "2" this indicated a revocation - for _, msg := range messages { - if msg.Properties[db.BlipProfile] == db.MessageChanges { - var changesMessages [][]interface{} - err = msg.ReadJSONBody(&changesMessages) - if err != nil { - continue - } + // Start a pull since 5 to receive revocation and removal + err = btcRunner.StartPullSince(btc.id, "false", "5", "false") + assert.NoError(t, err) - if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { - continue - } + // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, version) + require.True(t, found) + + messages := btc.pullReplication.GetMessages() + + testCases := []struct { + Name string + DocID string + ExpectedDeleted int64 + }{ + { + Name: "Revocation", + DocID: "doc", + ExpectedDeleted: int64(2), + }, + { + Name: "Removed", + DocID: "doc1", + ExpectedDeleted: int64(4), + }, + } - criteriaMet := false - for _, changesMessage := range changesMessages { - castedNum, ok := changesMessage[3].(json.Number) - if !ok { + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + // Verify the deleted property in the changes message is "2" this indicated a revocation + for _, msg := range messages { + if msg.Properties[db.BlipProfile] == db.MessageChanges { + var changesMessages [][]interface{} + err = msg.ReadJSONBody(&changesMessages) + if err != nil { continue } - intDeleted, err := castedNum.Int64() - if err != nil { + + if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { continue } - if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { - criteriaMet = true - break + + criteriaMet := false + for _, changesMessage := range changesMessages { + castedNum, ok := changesMessage[3].(json.Number) + if !ok { + continue + } + intDeleted, err := castedNum.Int64() + if err != nil { + continue + } + if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { + criteriaMet = true + break + } } - } - assert.True(t, criteriaMet) + assert.True(t, criteriaMet) + } } - } - }) - } + }) + } - assert.NoError(t, err) + assert.NoError(t, err) + }) } func TestRevocationNoRev(t *testing.T) { defer db.SuspendSequenceBatching()() - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc" + const waitMarkerID = "docmarker" - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc(docID, `{"channels": "A"}`) - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - // Remove role from user - revocationTester.removeRole("user", "foo") + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + // Remove role from user + revocationTester.removeRole("user", "foo") - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - messages := btc.pullReplication.GetMessages() + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - require.NoError(t, err) - require.Len(t, messageBody, 2) - require.Len(t, messageBody[0], 4) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + require.NoError(t, err) + require.Len(t, messageBody, 2) + require.Len(t, messageBody[0], 4) - deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - require.NoError(t, err) + deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + require.NoError(t, err) - assert.Equal(t, deletedFlag, int64(2)) + assert.Equal(t, deletedFlag, int64(2)) + }) } func TestRevocationGetSyncDataError(t *testing.T) { defer db.SuspendSequenceBatching()() var throw bool base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - // Two callbacks to cover usage with CBS/Xattrs and without - revocationTester, rt := InitScenario( - t, &RestTesterConfig{ - leakyBucketConfig: &base.LeakyBucketConfig{ - GetWithXattrCallback: func(key string) error { - return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") - }, GetRawCallback: func(key string) error { - if throw { - return fmt.Errorf("Leaky Bucket GetRawCallback Error") - } - return nil + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc" + const waitMarkerID = "docmarker" + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + // Two callbacks to cover usage with CBS/Xattrs and without + revocationTester, rt := InitScenario( + t, &RestTesterConfig{ + leakyBucketConfig: &base.LeakyBucketConfig{ + GetWithXattrCallback: func(key string) error { + return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") + }, GetRawCallback: func(key string) error { + if throw { + return fmt.Errorf("Leaky Bucket GetRawCallback Error") + } + return nil + }, }, }, - }, - ) + ) - defer rt.Close() + defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}}`) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc(docID, `{"channels": "A"}}`) - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) - throw = true - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + throw = true + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Remove role from user + revocationTester.removeRole("user", "foo") - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) + }) } // Regression test for CBG-2183. func TestBlipRevokeNonExistentRole(t *testing.T) { - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: false, - }) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) - res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) - RequireStatus(t, res, http.StatusCreated) - - // Create a doc so we have something to replicate - res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) - RequireStatus(t, res, http.StatusCreated) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: false, + }) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() + + // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) + res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) + RequireStatus(t, res, http.StatusCreated) + + // Create a doc so we have something to replicate + res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) + RequireStatus(t, res, http.StatusCreated) + + // 3. Update the user to not reference one of the roles (update to ['a1'], for example) + // [also revoke channel c1 so the doc shows up in the revocation queries] + res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) + RequireStatus(t, res, http.StatusOK) + + // 4. Try to sync + bt := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bilbo", + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer bt.Close() - // 3. Update the user to not reference one of the roles (update to ['a1'], for example) - // [also revoke channel c1 so the doc shows up in the revocation queries] - res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) - RequireStatus(t, res, http.StatusOK) + require.NoError(t, btcRunner.StartPull(bt.id)) - // 4. Try to sync - bt, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bilbo", - SendRevocations: true, + // in the failing case we'll panic before hitting this + base.RequireWaitForStat(t, func() int64 { + return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() + }, 1) }) - require.NoError(t, err) - defer bt.Close() - - require.NoError(t, bt.StartPull()) - - // in the failing case we'll panic before hitting this - base.RequireWaitForStat(t, func() int64 { - return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() - }, 1) } func TestReplicatorSwitchPurgeNoReset(t *testing.T) { diff --git a/rest/server_context.go b/rest/server_context.go index 96afe48d28..a3a0c9321a 100644 --- a/rest/server_context.go +++ b/rest/server_context.go @@ -89,10 +89,10 @@ const defaultConfigRetryTimeout = 3 * base.DefaultGocbV2OperationTimeout type bootstrapContext struct { Connection base.BootstrapConnection - configRetryTimeout time.Duration // configRetryTimeout defines the total amount of time to retry on a registry/config mismatch - terminator chan struct{} // Used to stop the goroutine handling the bootstrap polling - doneChan chan struct{} // doneChan is closed when the bootstrap polling goroutine finishes. - sgVersion base.ComparableVersion // version of Sync Gateway + configRetryTimeout time.Duration // configRetryTimeout defines the total amount of time to retry on a registry/config mismatch + terminator chan struct{} // Used to stop the goroutine handling the bootstrap polling + doneChan chan struct{} // doneChan is closed when the bootstrap polling goroutine finishes. + sgVersion base.ComparableBuildVersion // version of Sync Gateway } type getOrAddDatabaseConfigOptions struct { @@ -2060,7 +2060,7 @@ func (sc *ServerContext) initializeCouchbaseServerConnections(ctx context.Contex } if count > 0 { - base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs from buckets in cluster", count) + base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs for group %q from buckets in cluster", count, sc.Config.Bootstrap.ConfigGroupID) } else { base.WarnfCtx(ctx, "Config: No database configs for group %q. Continuing startup to allow REST API database creation", sc.Config.Bootstrap.ConfigGroupID) } @@ -2083,10 +2083,10 @@ func (sc *ServerContext) initializeCouchbaseServerConnections(ctx context.Contex base.DebugfCtx(ctx, base.KeyConfig, "Fetching configs from buckets in cluster for group %q", sc.Config.Bootstrap.ConfigGroupID) count, err := sc.fetchAndLoadConfigs(ctx, false) if err != nil { - base.WarnfCtx(ctx, "Couldn't load configs from bucket when polled: %v", err) + base.WarnfCtx(ctx, "Couldn't load configs from bucket for group %q when polled: %v", sc.Config.Bootstrap.ConfigGroupID, err) } if count > 0 { - base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs from buckets in cluster", count) + base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs for group %d from buckets in cluster", count, sc.Config.Bootstrap.ConfigGroupID) } } } diff --git a/rest/server_context_test.go b/rest/server_context_test.go index 793f328f6e..9afbc89789 100644 --- a/rest/server_context_test.go +++ b/rest/server_context_test.go @@ -840,11 +840,7 @@ func TestOfflineDatabaseStartup(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - ctx := base.TestCtx(t) - bucket := base.GetPersistentTestBucket(t) - defer bucket.Close(ctx) rt := NewRestTester(t, &RestTesterConfig{ - CustomTestBucket: bucket.NoCloseClone(), DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ StartOffline: base.BoolPtr(true), diff --git a/rest/sync_fn_test.go b/rest/sync_fn_test.go index f85c23a5b1..e294b6e324 100644 --- a/rest/sync_fn_test.go +++ b/rest/sync_fn_test.go @@ -898,20 +898,9 @@ func TestResyncRegenerateSequences(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - var testBucket *base.TestBucket - - if base.UnitTestUrlIsWalrus() { - var closeFn func() - testBucket, closeFn = base.GetPersistentWalrusBucket(t) - defer closeFn() - } else { - testBucket = base.GetTestBucket(t) - } - rt := NewRestTester(t, &RestTesterConfig{ - SyncFn: syncFn, - CustomTestBucket: testBucket, + SyncFn: syncFn, }, ) defer rt.Close() diff --git a/rest/upgradetest/remove_collection_test.go b/rest/upgradetest/remove_collection_test.go index 23141b8d7e..4072586208 100644 --- a/rest/upgradetest/remove_collection_test.go +++ b/rest/upgradetest/remove_collection_test.go @@ -27,7 +27,7 @@ func TestRemoveCollection(t *testing.T) { base.TestRequiresCollections(t) base.RequireNumTestBuckets(t, 2) numCollections := 2 - bucket := base.GetPersistentTestBucket(t) + bucket := base.GetTestBucket(t) defer bucket.Close(base.TestCtx(t)) base.RequireNumTestDataStores(t, numCollections) rtConfig := &rest.RestTesterConfig{ diff --git a/rest/upgradetest/upgrade_registry_test.go b/rest/upgradetest/upgrade_registry_test.go index 880e3caad8..f9142fb5ed 100644 --- a/rest/upgradetest/upgrade_registry_test.go +++ b/rest/upgradetest/upgrade_registry_test.go @@ -189,7 +189,7 @@ func getDbConfigFromLegacyConfig(rt *rest.RestTester) string { } func TestLegacyMetadataID(t *testing.T) { - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) // Create a non-persistent rest tester. Standard RestTester // creates a database 'db' targeting the default collection (when !TestUseNamedCollections) legacyRT := rest.NewRestTesterDefaultCollection(t, &rest.RestTesterConfig{ @@ -254,7 +254,7 @@ func TestMetadataIDRenameDatabase(t *testing.T) { // Verifies that matching metadataIDs are computed if two config groups for the same database are upgraded func TestMetadataIDWithConfigGroups(t *testing.T) { - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) // Create a non-persistent rest tester. Standard RestTester // creates a database 'db' targeting the default collection for legacy config. legacyRT := rest.NewRestTesterDefaultCollection(t, &rest.RestTesterConfig{ diff --git a/rest/user_api_test.go b/rest/user_api_test.go index fc97dbbcdf..9f7c5544fd 100644 --- a/rest/user_api_test.go +++ b/rest/user_api_test.go @@ -1530,7 +1530,7 @@ func TestGetUserCollectionAccess(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) scopesConfig := GetCollectionsConfig(t, testBucket, 2) @@ -1616,7 +1616,7 @@ func TestGetUserCollectionAccess(t *testing.T) { func TestPutUserCollectionAccess(t *testing.T) { base.RequireNumTestDataStores(t, 2) base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) scopesConfig := GetCollectionsConfig(t, testBucket, 2) rtConfig := &RestTesterConfig{ diff --git a/rest/utilities_testing.go b/rest/utilities_testing.go index 380584dba1..078284cca8 100644 --- a/rest/utilities_testing.go +++ b/rest/utilities_testing.go @@ -71,7 +71,7 @@ type RestTesterConfig struct { serverless bool // Runs SG in serverless mode. Must be used in conjunction with persistent config collectionConfig collectionConfiguration numCollections int - syncGatewayVersion *base.ComparableVersion // alternate version of Sync Gateway to use on startup + syncGatewayVersion *base.ComparableBuildVersion // alternate version of Sync Gateway to use on startup allowDbConfigEnvVars *bool } @@ -166,11 +166,7 @@ func (rt *RestTester) Bucket() base.Bucket { // If we have a TestBucket defined on the RestTesterConfig, use that instead of requesting a new one. testBucket := rt.RestTesterConfig.CustomTestBucket if testBucket == nil { - if rt.PersistentConfig { - testBucket = base.GetPersistentTestBucket(rt.TB) - } else { - testBucket = base.GetTestBucket(rt.TB) - } + testBucket = base.GetTestBucket(rt.TB) if rt.leakyBucketConfig != nil { leakyConfig := *rt.leakyBucketConfig // Ignore closures to avoid double closing panics diff --git a/rest/utilities_testing_test.go b/rest/utilities_testing_test.go index f17c49192d..b41d471df6 100644 --- a/rest/utilities_testing_test.go +++ b/rest/utilities_testing_test.go @@ -261,7 +261,7 @@ func TestRestTesterTemplateMultipleDatabases(t *testing.T) { } base.RequireNumTestBuckets(t, 2) ctx := base.TestCtx(t) - bucket2 := base.GetPersistentTestBucket(t) + bucket2 := base.GetTestBucket(t) defer bucket2.Close(ctx) dbConfig = DbConfig{ Scopes: GetCollectionsConfig(rt.TB, bucket2, numCollections),