From e8f9645327f25826dcc0bd2b1ba12d169740fc68 Mon Sep 17 00:00:00 2001 From: Ben Brooks Date: Mon, 6 Nov 2023 17:12:51 +0000 Subject: [PATCH 01/14] Refactor: Rename ComparableVersion to ComparableBuildVersion (#6566) --- base/dcp_sharded.go | 12 +-- base/version.go | 4 +- ...parable.go => version_comparable_build.go} | 100 +++++++++--------- ...st.go => version_comparable_build_test.go} | 46 ++++---- rest/config.go | 2 +- rest/config_database.go | 2 +- rest/config_manager.go | 10 +- rest/config_manager_test.go | 6 +- rest/config_registry.go | 4 +- rest/server_context.go | 8 +- rest/utilities_testing.go | 2 +- 11 files changed, 98 insertions(+), 98 deletions(-) rename base/{version_comparable.go => version_comparable_build.go} (63%) rename base/{version_comparable_test.go => version_comparable_build_test.go} (79%) diff --git a/base/dcp_sharded.go b/base/dcp_sharded.go index 69acebd035..97f2e6952a 100644 --- a/base/dcp_sharded.go +++ b/base/dcp_sharded.go @@ -28,7 +28,7 @@ const DefaultImportPartitions = 16 const DefaultImportPartitionsServerless = 6 // firstVersionToSupportCollections represents the earliest Sync Gateway release that supports collections. -var firstVersionToSupportCollections = &ComparableVersion{ +var firstVersionToSupportCollections = &ComparableBuildVersion{ epoch: 0, major: 3, minor: 1, @@ -38,7 +38,7 @@ var firstVersionToSupportCollections = &ComparableVersion{ // nodeExtras is the contents of the JSON value of the cbgt.NodeDef.Extras field as used by Sync Gateway. type nodeExtras struct { // Version is the node's version. - Version *ComparableVersion `json:"v"` + Version *ComparableBuildVersion `json:"v"` } // CbgtContext holds the two handles we have for CBGT-related functionality. @@ -376,7 +376,7 @@ func (c *CbgtContext) StartManager(ctx context.Context, dbName string, configGro // getNodeVersion returns the version of the node from its Extras field, or nil if none is stored. Returns an error if // the extras could not be parsed. -func getNodeVersion(def *cbgt.NodeDef) (*ComparableVersion, error) { +func getNodeVersion(def *cbgt.NodeDef) (*ComparableBuildVersion, error) { if len(def.Extras) == 0 { return nil, nil } @@ -388,7 +388,7 @@ func getNodeVersion(def *cbgt.NodeDef) (*ComparableVersion, error) { } // getMinNodeVersion returns the version of the oldest node currently in the cluster. -func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableVersion, error) { +func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableBuildVersion, error) { nodes, _, err := cbgt.CfgGetNodeDefs(cfg, cbgt.NODE_DEFS_KNOWN) if err != nil { return nil, err @@ -397,14 +397,14 @@ func getMinNodeVersion(cfg cbgt.Cfg) (*ComparableVersion, error) { // If there are no nodes at all, it's likely we're the first node in the cluster. return ProductVersion, nil } - var minVersion *ComparableVersion + var minVersion *ComparableBuildVersion for _, node := range nodes.NodeDefs { nodeVersion, err := getNodeVersion(node) if err != nil { return nil, fmt.Errorf("failed to get version of node %v: %w", MD(node.HostPort).Redact(), err) } if nodeVersion == nil { - nodeVersion = zeroComparableVersion() + nodeVersion = zeroComparableBuildVersion() } if minVersion == nil || nodeVersion.Less(minVersion) { minVersion = nodeVersion diff --git a/base/version.go b/base/version.go index aff28382c2..531d30f92d 100644 --- a/base/version.go +++ b/base/version.go @@ -27,7 +27,7 @@ const ( // populated via init() below var ( // ProductVersion describes the specific version information of the build. - ProductVersion *ComparableVersion + ProductVersion *ComparableBuildVersion // VersionString appears in the "Server:" header of HTTP responses. // CBL 1.x parses the header to determine whether it's talking to Sync Gateway (vs. CouchDB) and what version. @@ -109,7 +109,7 @@ func init() { editionStr = productEditionShortName var err error - ProductVersion, err = NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) + ProductVersion, err = NewComparableBuildVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) if err != nil { panic(err) } diff --git a/base/version_comparable.go b/base/version_comparable_build.go similarity index 63% rename from base/version_comparable.go rename to base/version_comparable_build.go index 27f212382d..541761d52d 100644 --- a/base/version_comparable.go +++ b/base/version_comparable_build.go @@ -15,22 +15,22 @@ import ( ) const ( - // comparableVersionEpoch can be incremented when the versioning system or string format changes, whilst maintaining ordering. + // comparableBuildVersionEpoch can be incremented when the versioning system or string format changes, whilst maintaining ordering. // i.e. It's a version number version // e.g: version system change from semver to dates: 0:30.2.1@45-EE < 1:22-3-25@33-EE - comparableVersionEpoch = 0 + comparableBuildVersionEpoch = 0 ) -// ComparableVersion is an [epoch:]major.minor.patch[.other][@build][-edition] version that has methods to reliably extract information. -type ComparableVersion struct { +// ComparableBuildVersion is an [epoch:]major.minor.patch[.other][@build][-edition] version that has methods to reliably extract information. +type ComparableBuildVersion struct { epoch, major, minor, patch, other uint8 build uint16 edition productEdition str string } -func zeroComparableVersion() *ComparableVersion { - v := &ComparableVersion{ +func zeroComparableBuildVersion() *ComparableBuildVersion { + v := &ComparableBuildVersion{ epoch: 0, major: 0, minor: 0, @@ -39,18 +39,18 @@ func zeroComparableVersion() *ComparableVersion { build: 0, edition: "", } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() return v } -// NewComparableVersionFromString parses a ComparableVersion from the given version string. +// NewComparableBuildVersionFromString parses a ComparableBuildVersion from the given version string. // Expected format: `[epoch:]major.minor.patch[.other][@build][-edition]` -func NewComparableVersionFromString(version string) (*ComparableVersion, error) { - epoch, major, minor, patch, other, build, edition, err := parseComparableVersion(version) +func NewComparableBuildVersionFromString(version string) (*ComparableBuildVersion, error) { + epoch, major, minor, patch, other, build, edition, err := parseComparableBuildVersion(version) if err != nil { return nil, err } - v := &ComparableVersion{ + v := &ComparableBuildVersion{ epoch: epoch, major: major, minor: minor, @@ -59,20 +59,20 @@ func NewComparableVersionFromString(version string) (*ComparableVersion, error) build: build, edition: edition, } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() if v.str != version { return nil, fmt.Errorf("version string %q is not equal to formatted version string %q", version, v.str) } return v, nil } -func NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (*ComparableVersion, error) { - _, major, minor, patch, other, build, edition, err := parseComparableVersionComponents("", majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) +func NewComparableBuildVersion(majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (*ComparableBuildVersion, error) { + _, major, minor, patch, other, build, edition, err := parseComparableBuildVersionComponents("", majorStr, minorStr, patchStr, otherStr, buildStr, editionStr) if err != nil { return nil, err } - v := &ComparableVersion{ - epoch: comparableVersionEpoch, + v := &ComparableBuildVersion{ + epoch: comparableBuildVersionEpoch, major: major, minor: minor, patch: patch, @@ -80,12 +80,12 @@ func NewComparableVersion(majorStr, minorStr, patchStr, otherStr, buildStr, edit build: build, edition: edition, } - v.str = v.formatComparableVersion() + v.str = v.formatComparableBuildVersion() return v, nil } // Equal returns true if pv is equal to b -func (pv *ComparableVersion) Equal(b *ComparableVersion) bool { +func (pv *ComparableBuildVersion) Equal(b *ComparableBuildVersion) bool { return pv.epoch == b.epoch && pv.major == b.major && pv.minor == b.minor && @@ -96,7 +96,7 @@ func (pv *ComparableVersion) Equal(b *ComparableVersion) bool { } // Less returns true if a is less than b -func (a *ComparableVersion) Less(b *ComparableVersion) bool { +func (a *ComparableBuildVersion) Less(b *ComparableBuildVersion) bool { if a.epoch < b.epoch { return true } else if a.epoch > b.epoch { @@ -138,7 +138,7 @@ func (a *ComparableVersion) Less(b *ComparableVersion) bool { } // AtLeastMinorDowngrade returns true there is a major or minor downgrade from a to b. -func (a *ComparableVersion) AtLeastMinorDowngrade(b *ComparableVersion) bool { +func (a *ComparableBuildVersion) AtLeastMinorDowngrade(b *ComparableBuildVersion) bool { if a.epoch != b.epoch { return a.epoch > b.epoch } @@ -148,82 +148,82 @@ func (a *ComparableVersion) AtLeastMinorDowngrade(b *ComparableVersion) bool { return a.minor > b.minor } -func (pv ComparableVersion) String() string { +func (pv ComparableBuildVersion) String() string { return pv.str } -// MarshalJSON implements json.Marshaler for ComparableVersion. The JSON representation is the version string. -func (pv *ComparableVersion) MarshalJSON() ([]byte, error) { +// MarshalJSON implements json.Marshaler for ComparableBuildVersion. The JSON representation is the version string. +func (pv *ComparableBuildVersion) MarshalJSON() ([]byte, error) { return JSONMarshal(pv.String()) } -func (pv *ComparableVersion) UnmarshalJSON(val []byte) error { +func (pv *ComparableBuildVersion) UnmarshalJSON(val []byte) error { var strVal string err := JSONUnmarshal(val, &strVal) if err != nil { return err } if strVal != "" { - pv.epoch, pv.major, pv.minor, pv.patch, pv.other, pv.build, pv.edition, err = parseComparableVersion(strVal) + pv.epoch, pv.major, pv.minor, pv.patch, pv.other, pv.build, pv.edition, err = parseComparableBuildVersion(strVal) } - pv.str = pv.formatComparableVersion() + pv.str = pv.formatComparableBuildVersion() return err } const ( - comparableVersionSep = '.' - comparableVersionSepEpoch = ':' - comparableVersionSepBuild = '@' - comparableVersionSepEdition = '-' + comparableBuildVersionSep = '.' + comparableBuildVersionSepEpoch = ':' + comparableBuildVersionSepBuild = '@' + comparableBuildVersionSepEdition = '-' ) -// formatComparableVersion returns the string representation of the given version. +// formatComparableBuildVersion returns the string representation of the given version. // format: `[epoch:]major.minor.patch[.other][@build][-edition]` -func (pv *ComparableVersion) formatComparableVersion() string { +func (pv *ComparableBuildVersion) formatComparableBuildVersion() string { if pv == nil { return "0.0.0" } epochStr := "" if pv.epoch > 0 { - epochStr = strconv.FormatUint(uint64(pv.epoch), 10) + string(comparableVersionSepEpoch) + epochStr = strconv.FormatUint(uint64(pv.epoch), 10) + string(comparableBuildVersionSepEpoch) } semverStr := strconv.FormatUint(uint64(pv.major), 10) + - string(comparableVersionSep) + + string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.minor), 10) + - string(comparableVersionSep) + + string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.patch), 10) otherStr := "" if pv.other > 0 { - otherStr = string(comparableVersionSep) + + otherStr = string(comparableBuildVersionSep) + strconv.FormatUint(uint64(pv.other), 10) } buildStr := "" if pv.build > 0 { - buildStr = string(comparableVersionSepBuild) + strconv.FormatUint(uint64(pv.build), 10) + buildStr = string(comparableBuildVersionSepBuild) + strconv.FormatUint(uint64(pv.build), 10) } editionStr := "" if ed := pv.edition.String(); ed != "" { - editionStr = string(comparableVersionSepEdition) + ed + editionStr = string(comparableBuildVersionSepEdition) + ed } return epochStr + semverStr + otherStr + buildStr + editionStr } -func parseComparableVersion(version string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { - epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr, err := extractComparableVersionComponents(version) +func parseComparableBuildVersion(version string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { + epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr, err := extractComparableBuildVersionComponents(version) if err != nil { return 0, 0, 0, 0, 0, 0, "", err } - return parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr) + return parseComparableBuildVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, edtionStr) } -func parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { +func parseComparableBuildVersionComponents(epochStr, majorStr, minorStr, patchStr, otherStr, buildStr, editionStr string) (epoch, major, minor, patch, other uint8, build uint16, edition productEdition, err error) { if epochStr != "" { tmp, err := strconv.ParseUint(epochStr, 10, 8) if err != nil { @@ -282,8 +282,8 @@ func parseComparableVersionComponents(epochStr, majorStr, minorStr, patchStr, ot return epoch, major, minor, patch, other, build, edition, nil } -// extractComparableVersionComponents takes a version string and returns each component as a string -func extractComparableVersionComponents(version string) (epoch, major, minor, patch, other, build, edition string, err error) { +// extractComparableBuildVersionComponents takes a version string and returns each component as a string +func extractComparableBuildVersionComponents(version string) (epoch, major, minor, patch, other, build, edition string, err error) { var remainder string @@ -291,18 +291,18 @@ func extractComparableVersionComponents(version string) (epoch, major, minor, pa // and still iterating over the entire string only once, albeit in small chunks. // prefixes - epoch, remainder = safeCutBefore(version, string(comparableVersionSepEpoch)) + epoch, remainder = safeCutBefore(version, string(comparableBuildVersionSepEpoch)) // suffixes - edition, remainder = safeCutAfter(remainder, string(comparableVersionSepEdition)) - build, remainder = safeCutAfter(remainder, string(comparableVersionSepBuild)) + edition, remainder = safeCutAfter(remainder, string(comparableBuildVersionSepEdition)) + build, remainder = safeCutAfter(remainder, string(comparableBuildVersionSepBuild)) // major.minor.patch[.other] - major, remainder = safeCutBefore(remainder, string(comparableVersionSep)) - minor, remainder = safeCutBefore(remainder, string(comparableVersionSep)) + major, remainder = safeCutBefore(remainder, string(comparableBuildVersionSep)) + minor, remainder = safeCutBefore(remainder, string(comparableBuildVersionSep)) // handle optional [.other] - if before, after, ok := strings.Cut(remainder, string(comparableVersionSep)); !ok { + if before, after, ok := strings.Cut(remainder, string(comparableBuildVersionSep)); !ok { patch = remainder } else { patch = before diff --git a/base/version_comparable_test.go b/base/version_comparable_build_test.go similarity index 79% rename from base/version_comparable_test.go rename to base/version_comparable_build_test.go index 55f8515625..80ebf6351d 100644 --- a/base/version_comparable_test.go +++ b/base/version_comparable_build_test.go @@ -15,9 +15,9 @@ import ( "github.com/stretchr/testify/require" ) -func TestComparableVersion(t *testing.T) { +func TestComparableBuildVersion(t *testing.T) { // An *ascending* list of valid versions (order is required for comparison testing) - testDataComparableVersions := []struct { + testDataComparableBuildVersions := []struct { str string }{ {"0.0.0"}, // min @@ -48,9 +48,9 @@ func TestComparableVersion(t *testing.T) { {"255:255.255.255.255@65535-EE"}, // max } - for i, test := range testDataComparableVersions { + for i, test := range testDataComparableBuildVersions { t.Run(test.str, func(t *testing.T) { - current, err := NewComparableVersionFromString(test.str) + current, err := NewComparableBuildVersionFromString(test.str) require.NoError(t, err) // string->version->string round-trip @@ -58,8 +58,8 @@ func TestComparableVersion(t *testing.T) { // comparisons (Less/Equal) if i > 1 { - prevStr := testDataComparableVersions[i-1].str - previous, err := NewComparableVersionFromString(prevStr) + prevStr := testDataComparableBuildVersions[i-1].str + previous, err := NewComparableBuildVersionFromString(prevStr) require.NoError(t, err) assert.Truef(t, previous.Less(current), "incorrect comparison: expected %q < %q", prevStr, test.str) @@ -70,8 +70,8 @@ func TestComparableVersion(t *testing.T) { } } -func TestInvalidComparableVersion(t *testing.T) { - // A list of invalid ComparableVersion +func TestInvalidComparableBuildVersion(t *testing.T) { + // A list of invalid ComparableBuildVersion tests := []struct { ver string }{ @@ -108,29 +108,29 @@ func TestInvalidComparableVersion(t *testing.T) { for _, test := range tests { t.Run(test.ver, func(t *testing.T) { - ver, err := NewComparableVersionFromString(test.ver) + ver, err := NewComparableBuildVersionFromString(test.ver) assert.Error(t, err) assert.Nil(t, ver) }) } } -func TestComparableVersionJSONRoundTrip(t *testing.T) { +func TestComparableBuildVersionJSONRoundTrip(t *testing.T) { json, err := JSONMarshal(ProductVersion) require.NoError(t, err) - var version ComparableVersion + var version ComparableBuildVersion err = JSONUnmarshal(json, &version) require.NoError(t, err) require.True(t, ProductVersion.Equal(&version)) require.Equal(t, ProductVersion.String(), version.String()) } -func TestComparableVersionEmptyStringJSON(t *testing.T) { - var version ComparableVersion +func TestComparableBuildVersionEmptyStringJSON(t *testing.T) { + var version ComparableBuildVersion err := JSONUnmarshal([]byte(`""`), &version) require.NoError(t, err) - require.True(t, zeroComparableVersion().Equal(&version)) - require.Equal(t, "0.0.0", zeroComparableVersion().String()) + require.True(t, zeroComparableBuildVersion().Equal(&version)) + require.Equal(t, "0.0.0", zeroComparableBuildVersion().String()) require.Equal(t, "0.0.0", version.String()) } @@ -224,30 +224,30 @@ func TestAtLeastMinorDowngradeVersion(t *testing.T) { for _, test := range testCases { t.Run(fmt.Sprintf("%s->%s", test.versionA, test.versionB), func(t *testing.T) { - versionA, err := NewComparableVersionFromString(test.versionA) + versionA, err := NewComparableBuildVersionFromString(test.versionA) require.NoError(t, err) - versionB, err := NewComparableVersionFromString(test.versionB) + versionB, err := NewComparableBuildVersionFromString(test.versionB) require.NoError(t, err) require.Equal(t, test.minorDowngrade, versionA.AtLeastMinorDowngrade(versionB)) }) } } -func BenchmarkComparableVersion(b *testing.B) { +func BenchmarkComparableBuildVersion(b *testing.B) { const str = "8:7.6.5.4@3-EE" - current, err := NewComparableVersionFromString(str) + current, err := NewComparableBuildVersionFromString(str) require.NoError(b, err) - b.Run("parseComparableVersion", func(b *testing.B) { + b.Run("parseComparableBuildVersion", func(b *testing.B) { for i := 0; i < b.N; i++ { - _, _, _, _, _, _, _, _ = parseComparableVersion(str) + _, _, _, _, _, _, _, _ = parseComparableBuildVersion(str) } }) - b.Run("formatComparableVersion", func(b *testing.B) { + b.Run("formatComparableBuildVersion", func(b *testing.B) { for i := 0; i < b.N; i++ { - _ = current.formatComparableVersion() + _ = current.formatComparableBuildVersion() } }) } diff --git a/rest/config.go b/rest/config.go index 41a9671bbd..6c2bd87d8c 100644 --- a/rest/config.go +++ b/rest/config.go @@ -1776,7 +1776,7 @@ func (sc *ServerContext) _applyConfig(nonContextStruct base.NonCancellableContex configSGVersionStr = cnf.SGVersion } - configSGVersion, err := base.NewComparableVersionFromString(configSGVersionStr) + configSGVersion, err := base.NewComparableBuildVersionFromString(configSGVersionStr) if err != nil { return false, err } diff --git a/rest/config_database.go b/rest/config_database.go index 6a97f1fbcd..cf2820a2dc 100644 --- a/rest/config_database.go +++ b/rest/config_database.go @@ -32,7 +32,7 @@ type DatabaseConfig struct { // Version is a generated Rev ID used for optimistic concurrency control using ETags/If-Match headers. Version string `json:"version,omitempty"` - // SGVersion is a base.ComparableVersion of the Sync Gateway node that wrote the config. + // SGVersion is a base.ComparableBuildVersion of the Sync Gateway node that wrote the config. SGVersion string `json:"sg_version,omitempty"` // MetadataID is the prefix used to store database metadata diff --git a/rest/config_manager.go b/rest/config_manager.go index 1cab53d351..5e3cec8279 100644 --- a/rest/config_manager.go +++ b/rest/config_manager.go @@ -30,10 +30,10 @@ type ConfigManager interface { DeleteConfig(ctx context.Context, bucket, dbName, groupID string) (err error) // CheckMinorDowngrade returns an error the sgVersion represents at least minor version downgrade from the version in the bucket. - CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error + CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error // SetSGVersion updates the Sync Gateway version in the bucket registry - SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error + SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error } type dbConfigNameOnly struct { @@ -590,7 +590,7 @@ func (b *bootstrapContext) getGatewayRegistry(ctx context.Context, bucketName st if registry.SGVersion.String() == "" { // 3.1.0 and 3.1.1 don't write a SGVersion, but everything else will configSGVersionStr := "3.1.0" - v, err := base.NewComparableVersionFromString(configSGVersionStr) + v, err := base.NewComparableBuildVersionFromString(configSGVersionStr) if err != nil { return nil, err } @@ -785,7 +785,7 @@ func (b *bootstrapContext) standardMetadataID(dbName string) string { } // CheckMinorDowngrade returns an error the sgVersion represents at least minor version downgrade from the version in the bucket. -func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error { +func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error { registry, err := b.getGatewayRegistry(ctx, bucketName) if err != nil { return err @@ -800,7 +800,7 @@ func (b *bootstrapContext) CheckMinorDowngrade(ctx context.Context, bucketName s } // SetSGVersion will update the registry in a bucket with a version of Sync Gateway. This will not perform a write if the version is already up to date. -func (b *bootstrapContext) SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableVersion) error { +func (b *bootstrapContext) SetSGVersion(ctx context.Context, bucketName string, sgVersion base.ComparableBuildVersion) error { registry, err := b.getGatewayRegistry(ctx, bucketName) if err != nil { return err diff --git a/rest/config_manager_test.go b/rest/config_manager_test.go index 57b2722546..ad99028da4 100644 --- a/rest/config_manager_test.go +++ b/rest/config_manager_test.go @@ -226,7 +226,7 @@ func TestVersionDowngrade(t *testing.T) { } for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - syncGatewayVersion, err := base.NewComparableVersionFromString(test.syncGatewayVersion) + syncGatewayVersion, err := base.NewComparableBuildVersionFromString(test.syncGatewayVersion) require.NoError(t, err) rt := NewRestTester(t, &RestTesterConfig{ PersistentConfig: true, @@ -240,7 +240,7 @@ func TestVersionDowngrade(t *testing.T) { require.NoError(t, err) require.True(t, syncGatewayVersion.Equal(®istry.SGVersion), "%+v != %+v", syncGatewayVersion, registry.SGVersion) - metadataConfigVersion, err := base.NewComparableVersionFromString(test.metadataConfigVersion) + metadataConfigVersion, err := base.NewComparableBuildVersionFromString(test.metadataConfigVersion) registry.SGVersion = *metadataConfigVersion require.NoError(t, err) require.NoError(t, bootstrapContext.setGatewayRegistry(rt.Context(), rt.Bucket().GetName(), registry)) @@ -258,7 +258,7 @@ func TestVersionDowngrade(t *testing.T) { registry, err = bootstrapContext.getGatewayRegistry(rt.Context(), rt.Bucket().GetName()) require.NoError(t, err) - expectedRegistryVersion, err := base.NewComparableVersionFromString(test.expectedRegistryVersion) + expectedRegistryVersion, err := base.NewComparableBuildVersionFromString(test.expectedRegistryVersion) require.NoError(t, err) require.True(t, expectedRegistryVersion.Equal(®istry.SGVersion), "%+v != %+v", expectedRegistryVersion, registry.SGVersion) diff --git a/rest/config_registry.go b/rest/config_registry.go index 5ae31d674a..3ce843894b 100644 --- a/rest/config_registry.go +++ b/rest/config_registry.go @@ -46,7 +46,7 @@ type GatewayRegistry struct { cas uint64 Version string `json:"version"` // Registry version ConfigGroups map[string]*RegistryConfigGroup `json:"config_groups"` // Map of config groups, keyed by config group ID - SGVersion base.ComparableVersion `json:"sg_version"` // Latest patch version of Sync Gateway that touched the registry + SGVersion base.ComparableBuildVersion `json:"sg_version"` // Latest patch version of Sync Gateway that touched the registry } const GatewayRegistryVersion = "1.0" @@ -84,7 +84,7 @@ type RegistryScope struct { var defaultOnlyRegistryScopes = map[string]RegistryScope{base.DefaultScope: {Collections: []string{base.DefaultCollection}}} var DefaultOnlyScopesConfig = ScopesConfig{base.DefaultScope: {Collections: map[string]*CollectionConfig{base.DefaultCollection: {}}}} -func NewGatewayRegistry(syncGatewayVersion base.ComparableVersion) *GatewayRegistry { +func NewGatewayRegistry(syncGatewayVersion base.ComparableBuildVersion) *GatewayRegistry { return &GatewayRegistry{ ConfigGroups: make(map[string]*RegistryConfigGroup), Version: GatewayRegistryVersion, diff --git a/rest/server_context.go b/rest/server_context.go index 96afe48d28..09f0902b9f 100644 --- a/rest/server_context.go +++ b/rest/server_context.go @@ -89,10 +89,10 @@ const defaultConfigRetryTimeout = 3 * base.DefaultGocbV2OperationTimeout type bootstrapContext struct { Connection base.BootstrapConnection - configRetryTimeout time.Duration // configRetryTimeout defines the total amount of time to retry on a registry/config mismatch - terminator chan struct{} // Used to stop the goroutine handling the bootstrap polling - doneChan chan struct{} // doneChan is closed when the bootstrap polling goroutine finishes. - sgVersion base.ComparableVersion // version of Sync Gateway + configRetryTimeout time.Duration // configRetryTimeout defines the total amount of time to retry on a registry/config mismatch + terminator chan struct{} // Used to stop the goroutine handling the bootstrap polling + doneChan chan struct{} // doneChan is closed when the bootstrap polling goroutine finishes. + sgVersion base.ComparableBuildVersion // version of Sync Gateway } type getOrAddDatabaseConfigOptions struct { diff --git a/rest/utilities_testing.go b/rest/utilities_testing.go index 380584dba1..5b577a7ec4 100644 --- a/rest/utilities_testing.go +++ b/rest/utilities_testing.go @@ -71,7 +71,7 @@ type RestTesterConfig struct { serverless bool // Runs SG in serverless mode. Must be used in conjunction with persistent config collectionConfig collectionConfiguration numCollections int - syncGatewayVersion *base.ComparableVersion // alternate version of Sync Gateway to use on startup + syncGatewayVersion *base.ComparableBuildVersion // alternate version of Sync Gateway to use on startup allowDbConfigEnvVars *bool } From a9033a30cd729807fe703df0feb0317f9be23f88 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Mon, 6 Nov 2023 12:13:21 -0500 Subject: [PATCH 02/14] CBG-2979 remove unused fields and docs for GET /db/ (#6565) --- docs/api/paths/admin/db-.yaml | 24 ----------------- rest/api.go | 50 ++++++++++------------------------- 2 files changed, 14 insertions(+), 60 deletions(-) diff --git a/docs/api/paths/admin/db-.yaml b/docs/api/paths/admin/db-.yaml index d74cada125..8971c23b7b 100644 --- a/docs/api/paths/admin/db-.yaml +++ b/docs/api/paths/admin/db-.yaml @@ -66,30 +66,6 @@ get: description: Unique server identifier. type: string example: 995618a6a6cc9ac79731bd13240e19b5 - scopes: - description: 'Scopes that are used by the database.' - type: object - example: - scope1: - collections: - collection1: - update_seq: 123456 - collection2: - update_seq: 654321 - additionalProperties: - description: 'The name of the scope.' - type: object - properties: - collections: - description: 'The set of collections within the scope.' - additionalProperties: - description: 'The name of the collection.' - type: object - properties: - update_seq: - description: 'The last sequence number that was committed to the collection.' - type: integer - example: 123456 '404': $ref: ../../components/responses.yaml#/Not-found tags: diff --git a/rest/api.go b/rest/api.go index 355970687d..f013820a01 100644 --- a/rest/api.go +++ b/rest/api.go @@ -394,17 +394,16 @@ func (h *handler) instanceStartTimeMicro() int64 { } type DatabaseRoot struct { - DBName string `json:"db_name"` - SequenceNumber *uint64 `json:"update_seq,omitempty"` // The last sequence written to the _default collection, if not running with multiple collections. - CommittedUpdateSequenceNumber *uint64 `json:"committed_update_seq,omitempty"` // Same as above - Used by perf tests, shouldn't be removed - InstanceStartTimeMicro int64 `json:"instance_start_time"` // microseconds since epoch - CompactRunning bool `json:"compact_running"` - PurgeSequenceNumber uint64 `json:"purge_seq"` - DiskFormatVersion uint64 `json:"disk_format_version"` - State string `json:"state"` - ServerUUID string `json:"server_uuid,omitempty"` - RequireResync []string `json:"require_resync,omitempty"` - Scopes map[string]databaseRootScope `json:"scopes,omitempty"` // stats for each scope/collection + DBName string `json:"db_name"` + SequenceNumber *uint64 `json:"update_seq,omitempty"` // The last sequence written to the _default collection, if not running with multiple collections. + CommittedUpdateSequenceNumber *uint64 `json:"committed_update_seq,omitempty"` // Same as above - Used by perf tests, shouldn't be removed + InstanceStartTimeMicro int64 `json:"instance_start_time"` // microseconds since epoch + CompactRunning bool `json:"compact_running"` + PurgeSequenceNumber uint64 `json:"purge_seq"` + DiskFormatVersion uint64 `json:"disk_format_version"` + State string `json:"state"` + ServerUUID string `json:"server_uuid,omitempty"` + RequireResync []string `json:"require_resync,omitempty"` } type dbSummary struct { @@ -413,33 +412,22 @@ type dbSummary struct { State string `json:"state"` } -type databaseRootScope struct { - Collections map[string]databaseRootCollection `json:"collections,omitempty"` -} - -type databaseRootCollection struct { - SequenceNumber uint64 `json:"update_seq"` // The last sequence written for this collection -} - func (h *handler) handleGetDB() error { if h.rq.Method == "HEAD" { return nil } - // TODO: If running with multiple collections, leave nil - var defaultCollectionLastSeq *uint64 - // Don't bother trying to lookup LastSequence() if offline + var lastSeq uint64 runState := db.RunStateString[atomic.LoadUint32(&h.db.State)] if runState != db.RunStateString[db.DBOffline] { - lastSeq, _ := h.db.LastSequence(h.ctx()) - defaultCollectionLastSeq = &lastSeq + lastSeq, _ = h.db.LastSequence(h.ctx()) } var response = DatabaseRoot{ DBName: h.db.Name, - SequenceNumber: defaultCollectionLastSeq, - CommittedUpdateSequenceNumber: defaultCollectionLastSeq, + SequenceNumber: &lastSeq, + CommittedUpdateSequenceNumber: &lastSeq, InstanceStartTimeMicro: h.instanceStartTimeMicro(), CompactRunning: h.db.IsCompactRunning(), PurgeSequenceNumber: 0, // TODO: Should track this value @@ -447,16 +435,6 @@ func (h *handler) handleGetDB() error { State: runState, ServerUUID: h.db.DatabaseContext.ServerUUID, RequireResync: h.db.RequireResync.ScopeAndCollectionNames(), - - // TODO: If running with multiple scope/collections - // Scopes: map[string]databaseRootScope{ - // "scope1": { - // Collections: map[string]databaseRootCollection{ - // "collection1": {SequenceNumber: 123456}, - // "collection2": {SequenceNumber: 987654}, - // }, - // }, - // }, } h.writeJSON(response) From 9daa699e2892160678d082861a7e2ea141531c73 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Mon, 6 Nov 2023 12:17:42 -0500 Subject: [PATCH 03/14] CBG-1966 enable staticcheck (#6556) --- .golangci-strict.yml | 3 +-- auth/auth_test.go | 2 +- auth/oidc_test.go | 7 ++----- auth/password_hash_test.go | 29 ++++++++++++++++------------- base/collection.go | 4 ---- base/dcp_sharded.go | 4 ++-- base/leaky_bucket.go | 3 +-- base/logger_external.go | 3 ++- base/logging_test.go | 7 ++----- base/util_testing.go | 15 +++++++++------ db/active_replicator_pull.go | 8 -------- db/active_replicator_push.go | 8 -------- db/attachment_test.go | 10 +++------- db/crud.go | 3 --- db/crud_test.go | 1 + db/database_test.go | 2 +- db/sg_replicate_cfg.go | 5 ++--- db/sg_replicate_cfg_test.go | 2 +- main.go | 7 ------- rest/api_test.go | 2 +- rest/bulk_api.go | 6 +----- rest/config_test.go | 2 +- rest/multipart_test.go | 4 +--- 23 files changed, 48 insertions(+), 89 deletions(-) diff --git a/.golangci-strict.yml b/.golangci-strict.yml index ea0427391b..c01f53b37c 100644 --- a/.golangci-strict.yml +++ b/.golangci-strict.yml @@ -24,7 +24,7 @@ linters: #- nakedret # Finds naked returns in functions greater than a specified function length #- prealloc # Finds slice declarations that could potentially be preallocated #- revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - #- staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks + - staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - unconvert # Remove unnecessary type conversions #- unparam # Reports unused function parameters @@ -67,7 +67,6 @@ linters: - nakedret # Finds naked returns in functions greater than a specified function length - prealloc # Finds slice declarations that could potentially be preallocated - revive # Golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes - - staticcheck # (megacheck) Staticcheck is a go vet on steroids, applying a ton of static analysis checks - structcheck # Finds unused struct fields - unparam # Reports unused function parameters - varcheck # Finds unused global variables and constants diff --git a/auth/auth_test.go b/auth/auth_test.go index b69c35ce9c..b34d0f7c13 100644 --- a/auth/auth_test.go +++ b/auth/auth_test.go @@ -721,7 +721,7 @@ func TestConcurrentUserWrites(t *testing.T) { } // Retrieve user to trigger initial calculation of roles, channels - user, getErr := auth.GetUser(username) + _, getErr := auth.GetUser(username) require.NoError(t, getErr, "Error retrieving user") require.NoError(t, auth.SetBcryptCost(DefaultBcryptCost)) diff --git a/auth/oidc_test.go b/auth/oidc_test.go index 7c95030262..1d5930fa7a 100644 --- a/auth/oidc_test.go +++ b/auth/oidc_test.go @@ -1205,9 +1205,8 @@ func TestJWTRolesChannels(t *testing.T) { for i, login := range tc.logins { var ( - user User - err error - lastUpdateTime time.Time + user User + err error ) if i == 0 { user, err = auth.NewUser(testUserPrefix+"_"+testSubject, "test", base.SetFromArray(login.explicitChannels)) @@ -1254,8 +1253,6 @@ func TestJWTRolesChannels(t *testing.T) { } require.Equal(t, base.SetFromArray(login.expectedChannels), user.Channels().AsSet()) - require.Greater(t, user.JWTLastUpdated(), lastUpdateTime) - lastUpdateTime = user.JWTLastUpdated() } }) } diff --git a/auth/password_hash_test.go b/auth/password_hash_test.go index 0dacc1a04c..1180402c97 100644 --- a/auth/password_hash_test.go +++ b/auth/password_hash_test.go @@ -35,27 +35,30 @@ func BenchmarkBcryptCostTimes(b *testing.B) { for i := minCostToTest; i < maxCostToTest; i++ { b.Run(fmt.Sprintf("cost%d", i), func(bn *testing.B) { - bn.N = 1 _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), i) assert.NoError(bn, err) }) } } -// TestBcryptDefaultCostTime will ensure that the default bcrypt cost takes at least a 'reasonable' amount of time -// If this test fails, it suggests maybe we need to think about increasing the default cost... -func TestBcryptDefaultCostTime(t *testing.T) { - // Modest 2.2GHz macbook i7 takes ~80ms at cost 10 - // Assume server CPUs are ~2x faster - minimumDuration := 40 * time.Millisecond +// TestBcryptCostTimes will output the time it takes to hash a password with each bcrypt cost value +func TestBcryptCostTimes(t *testing.T) { + // Little value in running this regularly. Might be useful for one-off informational purposes + t.Skip("Test disabled") - startTime := time.Now() - _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), DefaultBcryptCost) - duration := time.Since(startTime) + minCostToTest := bcrypt.DefaultCost + maxCostToTest := bcrypt.DefaultCost + 5 - t.Logf("bcrypt.GenerateFromPassword with cost %d took: %v", DefaultBcryptCost, duration) - assert.NoError(t, err) - assert.True(t, minimumDuration < duration) + for i := minCostToTest; i < maxCostToTest; i++ { + t.Run(fmt.Sprintf("cost%d", i), func(t *testing.T) { + startTime := time.Now() + _, err := bcrypt.GenerateFromPassword([]byte("hunter2"), i) + duration := time.Since(startTime) + + t.Logf("bcrypt.GenerateFromPassword with cost %d took: %v", i, duration) + assert.NoError(t, err) + }) + } } func TestSetBcryptCost(t *testing.T) { diff --git a/base/collection.go b/base/collection.go index fee0fcb8ef..35511d841d 100644 --- a/base/collection.go +++ b/base/collection.go @@ -63,10 +63,6 @@ func GetGoCBv2Bucket(ctx context.Context, spec BucketSpec) (*GocbV2Bucket, error RetryStrategy: gocb.NewBestEffortRetryStrategy(nil), } - if spec.KvPoolSize > 0 { - // TODO: Equivalent of kvPoolSize in gocb v2? - } - cluster, err := gocb.Connect(connString, clusterOptions) if err != nil { InfofCtx(ctx, KeyAuth, "Unable to connect to cluster: %v", err) diff --git a/base/dcp_sharded.go b/base/dcp_sharded.go index 97f2e6952a..bdd915eb4f 100644 --- a/base/dcp_sharded.go +++ b/base/dcp_sharded.go @@ -678,11 +678,11 @@ func (meh *sgMgrEventHandlers) OnUnregisterPIndex(pindex *cbgt.PIndex) { // OnFeedError is required to trigger reconnection to a feed on a closed connection (EOF). // NotifyMgrOnClose will trigger cbgt closing and then attempt to reconnect to the feed, if the manager hasn't // been stopped. -func (meh *sgMgrEventHandlers) OnFeedError(srcType string, r cbgt.Feed, feedErr error) { +func (meh *sgMgrEventHandlers) OnFeedError(_ string, r cbgt.Feed, feedErr error) { // cbgt always passes srcType = SOURCE_GOCBCORE, but we have a wrapped type associated with our indexes - use that instead // for our logging - srcType = SOURCE_DCP_SG + srcType := SOURCE_DCP_SG var bucketName, bucketUUID string dcpFeed, ok := r.(cbgt.FeedEx) if ok { diff --git a/base/leaky_bucket.go b/base/leaky_bucket.go index 7a9ddb921c..20ef31791a 100644 --- a/base/leaky_bucket.go +++ b/base/leaky_bucket.go @@ -358,8 +358,7 @@ func dedupeTapEvents(tapEvents []sgbucket.FeedEvent) []sgbucket.FeedEvent { // sequence order as read off the feed. deduped := []sgbucket.FeedEvent{} for _, tapEvent := range tapEvents { - key := string(tapEvent.Key) - latestTapEventForKey := latestTapEventPerKey[key] + latestTapEventForKey := latestTapEventPerKey[string(tapEvent.Key)] if tapEvent.Cas == latestTapEventForKey.Cas { deduped = append(deduped, tapEvent) } diff --git a/base/logger_external.go b/base/logger_external.go index 1589f0128f..1130ee917d 100644 --- a/base/logger_external.go +++ b/base/logger_external.go @@ -46,7 +46,8 @@ func initExternalLoggers() { } func updateExternalLoggers() { - if consoleLogger != nil && consoleLogger.shouldLog(nil, LevelDebug, KeyWalrus) { + // use context.Background() since this is called from init or to reset test logging + if consoleLogger != nil && consoleLogger.shouldLog(context.Background(), LevelDebug, KeyWalrus) { rosmar.SetLogLevel(rosmar.LevelDebug) } else { rosmar.SetLogLevel(rosmar.LevelInfo) diff --git a/base/logging_test.go b/base/logging_test.go index c88684a6c7..fd50db33a0 100644 --- a/base/logging_test.go +++ b/base/logging_test.go @@ -11,7 +11,6 @@ package base import ( "bytes" "fmt" - "math/rand" "os" "path/filepath" "runtime" @@ -79,9 +78,7 @@ func BenchmarkLogRotation(b *testing.B) { for _, test := range tests { b.Run(fmt.Sprintf("rotate:%t-compress:%t-bytes:%v", test.rotate, test.compress, test.numBytes), func(bm *testing.B) { - data := make([]byte, test.numBytes) - _, err := rand.Read(data) - require.NoError(bm, err) + data := FastRandBytes(bm, test.numBytes) logPath := b.TempDir() logger := lumberjack.Logger{Filename: filepath.Join(logPath, "output.log"), Compress: test.compress} @@ -99,7 +96,7 @@ func BenchmarkLogRotation(b *testing.B) { // we can't remove temp dir while the async compression is still writing log files assert.NoError(bm, logger.Close()) ctx := TestCtx(bm) - err, _ = RetryLoop(ctx, "benchmark-logrotate-teardown", + err, _ := RetryLoop(ctx, "benchmark-logrotate-teardown", func() (shouldRetry bool, err error, value interface{}) { err = os.RemoveAll(logPath) return err != nil, err, nil diff --git a/base/util_testing.go b/base/util_testing.go index bdae598540..5bfa416f93 100644 --- a/base/util_testing.go +++ b/base/util_testing.go @@ -42,12 +42,6 @@ import ( var TestExternalRevStorage = false -func init() { - - // Prevent https://issues.couchbase.com/browse/MB-24237 - rand.Seed(time.Now().UTC().UnixNano()) -} - type TestBucket struct { Bucket BucketSpec BucketSpec @@ -929,3 +923,12 @@ func MoveDocument(t testing.TB, docID string, dst, src DataStore) { _, err = src.Remove(docID, srcCAS) require.NoError(t, err) } + +// FastRandBytes returns a set of random bytes. Uses a low quality random generator. +func FastRandBytes(t testing.TB, size int) []byte { + b := make([]byte, size) + // staticcheck wants to use crypto/rand as math/rand is deprecated in go 1.20, but we don't need that for testing + _, err := rand.Read(b) // nolint:staticcheck + require.NoError(t, err) + return b +} diff --git a/db/active_replicator_pull.go b/db/active_replicator_pull.go index a6b9c7e105..c5f94c140c 100644 --- a/db/active_replicator_pull.go +++ b/db/active_replicator_pull.go @@ -40,10 +40,6 @@ func (apr *ActivePullReplicator) Start(ctx context.Context) error { apr.lock.Lock() defer apr.lock.Unlock() - if apr == nil { - return fmt.Errorf("nil ActivePullReplicator, can't start") - } - if apr.ctx != nil && apr.ctx.Err() == nil { return fmt.Errorf("ActivePullReplicator already running") } @@ -158,10 +154,6 @@ func (apr *ActivePullReplicator) _subChanges(collectionIdx *int, since string) e func (apr *ActivePullReplicator) Complete() { base.TracefCtx(apr.ctx, base.KeyReplicate, "ActivePullReplicator.Complete()") apr.lock.Lock() - if apr == nil { - apr.lock.Unlock() - return - } _ = apr.forEachCollection(func(c *activeReplicatorCollection) error { base.TracefCtx(apr.ctx, base.KeyReplicate, "Before calling waitForExpectedSequences in Complete()") if err := c.Checkpointer.waitForExpectedSequences(); err != nil { diff --git a/db/active_replicator_push.go b/db/active_replicator_push.go index 7dac4886d4..a6a21f1431 100644 --- a/db/active_replicator_push.go +++ b/db/active_replicator_push.go @@ -44,10 +44,6 @@ func (apr *ActivePushReplicator) Start(ctx context.Context) error { apr.lock.Lock() defer apr.lock.Unlock() - if apr == nil { - return fmt.Errorf("nil ActivePushReplicator, can't start") - } - if apr.ctx != nil && apr.ctx.Err() == nil { return fmt.Errorf("ActivePushReplicator already running") } @@ -111,10 +107,6 @@ func (apr *ActivePushReplicator) _connect() error { func (apr *ActivePushReplicator) Complete() { base.TracefCtx(apr.ctx, base.KeyReplicate, "ActivePushReplicator.Complete()") apr.lock.Lock() - if apr == nil { - apr.lock.Unlock() - return - } // Wait for any pending changes responses to arrive and be processed err := apr._waitForPendingChangesResponse() diff --git a/db/attachment_test.go b/db/attachment_test.go index 60042bd63e..a29ccfa7f4 100644 --- a/db/attachment_test.go +++ b/db/attachment_test.go @@ -15,7 +15,6 @@ import ( "errors" "fmt" "log" - "math/rand" "net/http" "strconv" "strings" @@ -1509,12 +1508,9 @@ func TestLargeAttachments(t *testing.T) { defer db.Close(ctx) collection := GetSingleDatabaseCollectionWithUser(t, db) - normalAttachment := make([]byte, 15*1024*1024) // permissible size - oversizeAttachment := make([]byte, 25*1024*1024) // memcached would send an E2BIG - hugeAttachment := make([]byte, 35*1024*1024) // memcached would abruptly close our connection - _, _ = rand.Read(normalAttachment) - _, _ = rand.Read(oversizeAttachment) - _, _ = rand.Read(hugeAttachment) + normalAttachment := base.FastRandBytes(t, 15*1024*1024) // permissible size + oversizeAttachment := base.FastRandBytes(t, 25*1024*1024) // memcached would send an E2BIG + hugeAttachment := base.FastRandBytes(t, 35*1024*1024) // memcached would abruptly close our connection _, _, err := collection.Put(ctx, "testdoc", Body{ "_attachments": AttachmentsMeta{ diff --git a/db/crud.go b/db/crud.go index 75054fd3c2..586a9ca03d 100644 --- a/db/crud.go +++ b/db/crud.go @@ -413,9 +413,6 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR // db.DbStats.StatsDeltaSync().Add(base.StatKeyDeltaCacheHits, 1) db.dbStats().DeltaSync().DeltaCacheHit.Add(1) return fromRevision.Delta, nil, nil - } else { - // TODO: Recurse and merge deltas when gen(revCacheDelta.toRevID) < gen(toRevId) - // until then, fall through to generating delta for given rev pair } } diff --git a/db/crud_test.go b/db/crud_test.go index c3c9fcb10d..ea8f99f355 100644 --- a/db/crud_test.go +++ b/db/crud_test.go @@ -1360,6 +1360,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Get the document body bytes with the tombstone revision rev3, with listRevisions=true // Also validate that the BodyRevisions property is present and correct. bodyBytes, removed, err = collection.get1xRevFromDoc(ctx, doc, rev3, true) + require.NoError(t, err) assert.NotEmpty(t, bodyBytes, "Document body bytes should be returned") assert.False(t, removed, "This shouldn't be a removed document") assert.NoError(t, response.Unmarshal(bodyBytes)) diff --git a/db/database_test.go b/db/database_test.go index 3b938b25a2..f34123e791 100644 --- a/db/database_test.go +++ b/db/database_test.go @@ -3063,8 +3063,8 @@ func TestGetDatabaseCollectionWithUserDefaultCollection(t *testing.T) { require.NoError(t, err) db, err := GetDatabase(dbCtx, nil) - defer db.Close(ctx) require.NoError(t, err) + defer db.Close(ctx) col, err := db.GetDatabaseCollectionWithUser(testCase.scope, testCase.collection) if testCase.err { require.Error(t, err) diff --git a/db/sg_replicate_cfg.go b/db/sg_replicate_cfg.go index f5932cd2f1..b05fec92a6 100644 --- a/db/sg_replicate_cfg.go +++ b/db/sg_replicate_cfg.go @@ -803,10 +803,9 @@ func (m *sgReplicateManager) RefreshReplicationCfg(ctx context.Context) error { // Check for replications newly assigned to this node for replicationID, replicationCfg := range configReplications { if replicationCfg.AssignedNode == m.localNodeUUID { - replicator, exists := m.activeReplicators[replicationID] + _, exists := m.activeReplicators[replicationID] if !exists { - var initError error - replicator, initError = m.InitializeReplication(replicationCfg) + replicator, initError := m.InitializeReplication(replicationCfg) if initError != nil { base.WarnfCtx(m.loggingCtx, "Error initializing replication %s: %v", initError) continue diff --git a/db/sg_replicate_cfg_test.go b/db/sg_replicate_cfg_test.go index 87ae458cc0..7310b92a65 100644 --- a/db/sg_replicate_cfg_test.go +++ b/db/sg_replicate_cfg_test.go @@ -43,7 +43,7 @@ func TestReplicateManagerReplications(t *testing.T) { assert.Equal(t, replication1_id, r.ID) // Request non-existent replication - r, err = manager.GetReplication("dne") + _, err = manager.GetReplication("dne") require.Error(t, err, base.ErrNotFound) // Attempt to add existing replication diff --git a/main.go b/main.go index 1483f3b608..6630a16840 100644 --- a/main.go +++ b/main.go @@ -9,16 +9,9 @@ package main import ( - "math/rand" - "time" - "github.com/couchbase/sync_gateway/rest" ) -func init() { - rand.Seed(time.Now().UTC().UnixNano()) -} - // Simple Sync Gateway launcher tool. func main() { rest.ServerMain() diff --git a/rest/api_test.go b/rest/api_test.go index ea25a09857..377e8b06d8 100644 --- a/rest/api_test.go +++ b/rest/api_test.go @@ -1700,8 +1700,8 @@ func TestLongpollWithWildcard(t *testing.T) { // has a wait counter of zero (no documents writted since the listener was restarted). wg := sync.WaitGroup{} // start changes request + wg.Add(1) go func() { - wg.Add(1) defer wg.Done() changesJSON := `{"style":"all_docs", "heartbeat":300000, "feed":"longpoll", "limit":50, "since":"0"}` changesResponse := rt.SendUserRequest("POST", "/{{.keyspace}}/_changes", changesJSON, "bernard") diff --git a/rest/bulk_api.go b/rest/bulk_api.go index a5543c7b07..83358e7c49 100644 --- a/rest/bulk_api.go +++ b/rest/bulk_api.go @@ -264,9 +264,7 @@ func (h *handler) handleDump() error { func (h *handler) handleRepair() error { // TODO: If repair is re-enabled, it may need to be modified to support xattrs and GSI - if true == true { - return errors.New("_repair endpoint disabled") - } + return errors.New("_repair endpoint disabled") /*base.InfofCtx(h.ctx(), base.KeyHTTP, "Repair bucket") @@ -305,8 +303,6 @@ func (h *handler) handleRepair() error { return err */ - - return nil } // HTTP handler for _dumpchannel diff --git a/rest/config_test.go b/rest/config_test.go index 1a11dc2f29..9302e2d966 100644 --- a/rest/config_test.go +++ b/rest/config_test.go @@ -1431,8 +1431,8 @@ func TestSetupServerContext(t *testing.T) { config.Bootstrap.Password = base.TestClusterPassword() ctx := base.TestCtx(t) sc, err := SetupServerContext(ctx, &config, false) - defer sc.Close(ctx) require.NoError(t, err) + defer sc.Close(ctx) require.NotNil(t, sc) }) } diff --git a/rest/multipart_test.go b/rest/multipart_test.go index 8422e7a1d1..4647308167 100644 --- a/rest/multipart_test.go +++ b/rest/multipart_test.go @@ -16,7 +16,6 @@ import ( "fmt" "io" "log" - "math/rand" "mime/multipart" "net/http" "strconv" @@ -162,8 +161,7 @@ func TestWriteJSONPart(t *testing.T) { // writeJSONPart toggles compression to false if the incoming body is less than 300 bytes, so creating // a body larger than 300 bytes to test writeJSONPart with compression=true and compression=false mockFakeBody := func() db.Body { - bytes := make([]byte, 139) - rand.Read(bytes) + bytes := base.FastRandBytes(t, 139) value := fmt.Sprintf("%x", bytes) return db.Body{"key": "foo", "value": value} } From 2fe011c7c088ea1717050dbba417f1dbf91bdde8 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Tue, 7 Nov 2023 09:35:34 -0500 Subject: [PATCH 04/14] CBG-3585 log bucket and groupID during config search (#6564) --- rest/config.go | 16 ++++++++-------- rest/server_context.go | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/rest/config.go b/rest/config.go index 6c2bd87d8c..8e10fc925b 100644 --- a/rest/config.go +++ b/rest/config.go @@ -1467,11 +1467,11 @@ func (sc *ServerContext) migrateV30Configs(ctx context.Context) error { continue } - base.InfofCtx(ctx, base.KeyConfig, "Found legacy persisted config for database %s - migrating to db registry.", base.MD(dbConfig.Name)) + base.InfofCtx(ctx, base.KeyConfig, "Found legacy persisted config for database %s in bucket %s, groupID %s - migrating to db registry.", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID)) _, insertErr := sc.BootstrapContext.InsertConfig(ctx, bucketName, groupID, &dbConfig) if insertErr != nil { if insertErr == base.ErrAlreadyExists { - base.DebugfCtx(ctx, base.KeyConfig, "Found legacy config for database %s, but already exists in registry.", base.MD(dbConfig.Name)) + base.DebugfCtx(ctx, base.KeyConfig, "Found legacy config for database %s in bucket %s, groupID: %s, but already exists in registry.", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID)) } else { base.InfofCtx(ctx, base.KeyConfig, "Unable to persist migrated v3.0 config for bucket %s groupID %s: %s", base.MD(bucketName), base.MD(groupID), insertErr) continue @@ -1479,7 +1479,7 @@ func (sc *ServerContext) migrateV30Configs(ctx context.Context) error { } removeErr := sc.BootstrapContext.Connection.DeleteMetadataDocument(ctx, bucketName, PersistentConfigKey30(ctx, groupID), legacyCas) if removeErr != nil { - base.InfofCtx(ctx, base.KeyConfig, "Failed to remove legacy config for database %s: %s", base.MD(dbConfig.Name), removeErr) + base.InfofCtx(ctx, base.KeyConfig, "Failed to remove legacy config for database %s in bucket %s, groupID %s: %s", base.MD(dbConfig.Name), base.MD(bucketName), base.MD(groupID), base.MD(removeErr)) } } return nil @@ -1564,7 +1564,7 @@ func (sc *ServerContext) _fetchDatabase(ctx context.Context, dbName string) (fou cnf.CertPath = sc.Config.Bootstrap.X509CertPath cnf.KeyPath = sc.Config.Bootstrap.X509KeyPath } - base.TracefCtx(ctx, base.KeyConfig, "Got config for bucket %q with cas %d", bucket, cas) + base.TracefCtx(ctx, base.KeyConfig, "Got database config %s for bucket %q with cas %d and groupID %q", base.MD(dbName), base.MD(bucket), cas, base.MD(sc.Config.Bootstrap.ConfigGroupID)) return true, nil } @@ -1686,20 +1686,20 @@ func (sc *ServerContext) FetchConfigs(ctx context.Context, isInitialStartup bool fetchedConfigs := make(map[string]DatabaseConfig, len(buckets)) for _, bucket := range buckets { - base.TracefCtx(ctx, base.KeyConfig, "Checking for configs for group %q from bucket %q", sc.Config.Bootstrap.ConfigGroupID, bucket) + base.TracefCtx(ctx, base.KeyConfig, "Checking for configs for group %q from bucket %q", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket)) configs, err := sc.BootstrapContext.GetDatabaseConfigs(ctx, bucket, sc.Config.Bootstrap.ConfigGroupID) if err != nil { // Unexpected error fetching config - SDK has already performed retries, so we'll treat it as a registry removal // this could be due to invalid JSON or some other non-recoverable error. if isInitialStartup { - base.WarnfCtx(ctx, "Unable to fetch config for group %q from bucket %q on startup: %v", sc.Config.Bootstrap.ConfigGroupID, bucket, err) + base.WarnfCtx(ctx, "Unable to fetch configs for group %q from bucket %q on startup: %v", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket), err) } else { - base.DebugfCtx(ctx, base.KeyConfig, "Unable to fetch config for group %q from bucket %q: %v", sc.Config.Bootstrap.ConfigGroupID, bucket, err) + base.DebugfCtx(ctx, base.KeyConfig, "Unable to fetch configs for group %q from bucket %q: %v", sc.Config.Bootstrap.ConfigGroupID, base.MD(bucket), err) } continue } if len(configs) == 0 { - base.DebugfCtx(ctx, base.KeyConfig, "Bucket %q did not contain config for group %q", bucket, sc.Config.Bootstrap.ConfigGroupID) + base.DebugfCtx(ctx, base.KeyConfig, "Bucket %q did not contain any configs for group %q", base.MD(bucket), sc.Config.Bootstrap.ConfigGroupID) continue } for _, cnf := range configs { diff --git a/rest/server_context.go b/rest/server_context.go index 09f0902b9f..a3a0c9321a 100644 --- a/rest/server_context.go +++ b/rest/server_context.go @@ -2060,7 +2060,7 @@ func (sc *ServerContext) initializeCouchbaseServerConnections(ctx context.Contex } if count > 0 { - base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs from buckets in cluster", count) + base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs for group %q from buckets in cluster", count, sc.Config.Bootstrap.ConfigGroupID) } else { base.WarnfCtx(ctx, "Config: No database configs for group %q. Continuing startup to allow REST API database creation", sc.Config.Bootstrap.ConfigGroupID) } @@ -2083,10 +2083,10 @@ func (sc *ServerContext) initializeCouchbaseServerConnections(ctx context.Contex base.DebugfCtx(ctx, base.KeyConfig, "Fetching configs from buckets in cluster for group %q", sc.Config.Bootstrap.ConfigGroupID) count, err := sc.fetchAndLoadConfigs(ctx, false) if err != nil { - base.WarnfCtx(ctx, "Couldn't load configs from bucket when polled: %v", err) + base.WarnfCtx(ctx, "Couldn't load configs from bucket for group %q when polled: %v", sc.Config.Bootstrap.ConfigGroupID, err) } if count > 0 { - base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs from buckets in cluster", count) + base.InfofCtx(ctx, base.KeyConfig, "Successfully fetched %d database configs for group %d from buckets in cluster", count, sc.Config.Bootstrap.ConfigGroupID) } } } From 4508380620a086ad4ab885792eba20345581c225 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Wed, 8 Nov 2023 05:17:39 -0500 Subject: [PATCH 05/14] CBG-3498 make test pass without named collections (#6568) --- rest/oidc_api_test.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/rest/oidc_api_test.go b/rest/oidc_api_test.go index e063c770e8..ff057a1727 100644 --- a/rest/oidc_api_test.go +++ b/rest/oidc_api_test.go @@ -2451,10 +2451,6 @@ func mustMarshalJSON(t testing.TB, val interface{}) []byte { // Checks that we correctly handle the removal of an OIDC provider while it's in use func TestOpenIDConnectProviderRemoval(t *testing.T) { - if base.UnitTestUrlIsWalrus() { - // Requires persistent config - t.Skip("This test only works against Couchbase Server") - } const ( providerName = "foo" @@ -2535,7 +2531,13 @@ func TestOpenIDConnectProviderRemoval(t *testing.T) { UserCtx db.Body `json:"userCtx"` } require.NoError(t, base.JSONUnmarshal(res.Body.Bytes(), &sessionResponse)) - require.Nil(t, sessionResponse.UserCtx["channels"]) + // session response only contains non collection channels, and is blank if there is no default collection + if base.TestsUseNamedCollections() { + require.Nil(t, sessionResponse.UserCtx["channels"]) + } else { + require.NotContains(t, sessionResponse.UserCtx["channels"], testChannelName) + + } } // This test verifies the edge case of having two different OIDC providers with different role/channel configurations From 6323ac9fc68bc561bd95854bf8b87564804f05a0 Mon Sep 17 00:00:00 2001 From: Ben Brooks Date: Wed, 8 Nov 2023 16:36:36 +0000 Subject: [PATCH 06/14] Bump Go version to 1.21.4 (#6569) --- .github/workflows/ci.yml | 12 ++++++------ Jenkinsfile | 2 +- manifest/product-config.json | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d334b2aca4..b7e6c1e5ae 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: go-build run: go build "./..." @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - run: go install github.com/google/addlicense@latest - run: addlicense -check -f licenses/addlicense.tmpl . @@ -49,7 +49,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 cache: false - name: golangci-lint uses: golangci/golangci-lint-action@v3 @@ -77,7 +77,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Build run: go build -v "./..." - name: Run Tests @@ -97,7 +97,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Run Tests run: go test -race -timeout=30m -count=1 -json -v "./..." | tee test.json | jq -s -jr 'sort_by(.Package,.Time) | .[].Output | select (. != null )' shell: bash @@ -137,7 +137,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: 1.21.3 + go-version: 1.21.4 - name: Build run: go build -v "./tools/stats-definition-exporter" - name: Run Tests diff --git a/Jenkinsfile b/Jenkinsfile index 491f8dacf5..58d4f0da3c 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,7 +16,7 @@ pipeline { } tools { - go '1.21.3' + go '1.21.4' } stages { diff --git a/manifest/product-config.json b/manifest/product-config.json index 5314fb9eae..22097f880f 100644 --- a/manifest/product-config.json +++ b/manifest/product-config.json @@ -6,7 +6,7 @@ "release_name": "Couchbase Sync Gateway", "production": true, "interval": 30, - "go_version": "1.21.3", + "go_version": "1.21.4", "trigger_blackduck": true, "start_build": 1 }, From a398881cc0e8a3d1ac652b0d03f67897a52772b6 Mon Sep 17 00:00:00 2001 From: Gregory Newman-Smith <109068393+gregns1@users.noreply.github.com> Date: Fri, 10 Nov 2023 17:08:41 +0000 Subject: [PATCH 07/14] CBG-3576: BlipTestClient support for HLV and rev tree modes (#6567) * CBG-3210: Updating HLV on Put And PutExistingRev (#6366) * CBG-3209: Add cv index and retrieval for revision cache (#6491) * CBG-3209: changes for retreival of a doc from the rev cache via CV with backwards compatability in mind * fix failing test, add commnets * fix lint * updated to address comments * rebase chnages needed * updated to tests that call Get on revision cache * updates based of new direction with PR + addressing comments * updated to fix panic * updated to fix another panic * address comments * updates based off commnets * remove commnented out line * updates to skip test relying on import and update PutExistingRev doc update type to update HLV * updates to remove code adding rev id to value inside addToRevMapPostLoad. Added code to assign this inside value.store * remove redundent code * CBG-3210: Updating HLV on Put And PutExistingRev (#6366) * CBG-3209: Add cv index and retrieval for revision cache (#6491) * CBG-3209: changes for retreival of a doc from the rev cache via CV with backwards compatability in mind * fix failing test, add commnets * fix lint * updated to address comments * rebase chnages needed * updated to tests that call Get on revision cache * updates based of new direction with PR + addressing comments * updated to fix panic * updated to fix another panic * address comments * updates based off commnets * remove commnented out line * updates to skip test relying on import and update PutExistingRev doc update type to update HLV * updates to remove code adding rev id to value inside addToRevMapPostLoad. Added code to assign this inside value.store * remove redundent code * CBG-3576: changes to BlipTesterClient to run with version vector subprotocol and non version vector subprotocol * updates to work with rebase changes * changes to remove repeated code, assuming from the rebase? * refactoring based off discussion with Ben * lint error fix --- db/access_test.go | 8 +- db/attachment_test.go | 28 +- db/blip_handler.go | 4 +- db/change_cache.go | 2 +- db/changes_test.go | 4 +- db/crud.go | 90 +- db/crud_test.go | 124 +-- db/database.go | 16 + db/database_test.go | 216 ++-- db/document.go | 82 +- db/document_test.go | 101 ++ db/hybrid_logical_vector.go | 49 +- db/import.go | 3 +- db/query_test.go | 30 +- db/revision_cache_bypass.go | 40 +- db/revision_cache_interface.go | 100 +- db/revision_cache_lru.go | 286 ++++- db/revision_cache_test.go | 381 ++++++- db/revision_test.go | 2 +- rest/api_test.go | 91 ++ rest/attachment_test.go | 367 ++++--- rest/blip_api_attachment_test.go | 891 ++++++++-------- rest/blip_api_collections_test.go | 552 +++++----- rest/blip_api_crud_test.go | 962 +++++------------ rest/blip_api_delta_sync_test.go | 1343 ++++++++++++------------ rest/blip_api_no_race_test.go | 123 +-- rest/blip_client_test.go | 209 ++-- rest/bulk_api.go | 2 +- rest/doc_api.go | 4 +- rest/importtest/import_test.go | 3 + rest/replicatortest/replicator_test.go | 43 + rest/revocation_test.go | 448 ++++---- rest/user_api_test.go | 4 +- 33 files changed, 3704 insertions(+), 2904 deletions(-) diff --git a/db/access_test.go b/db/access_test.go index 9b23710cb5..48ee595fc7 100644 --- a/db/access_test.go +++ b/db/access_test.go @@ -44,7 +44,7 @@ func TestDynamicChannelGrant(t *testing.T) { // Create a document in channel chan1 doc1Body := Body{"channel": "chan1", "greeting": "hello"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc1", doc1Body, []string{"1-a"}, false) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc1", doc1Body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err) // Verify user cannot access document @@ -54,7 +54,7 @@ func TestDynamicChannelGrant(t *testing.T) { // Write access granting document grantingBody := Body{"type": "setaccess", "owner": "user1", "channel": "chan1"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant1", grantingBody, []string{"1-a"}, false) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant1", grantingBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err) // Verify reloaded user can access document @@ -66,12 +66,12 @@ func TestDynamicChannelGrant(t *testing.T) { // Create a document in channel chan2 doc2Body := Body{"channel": "chan2", "greeting": "hello"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc2", doc2Body, []string{"1-a"}, false) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc2", doc2Body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err) // Write access granting document for chan2 (tests invalidation when channels/inval_seq exists) grantingBody = Body{"type": "setaccess", "owner": "user1", "channel": "chan2"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant2", grantingBody, []string{"1-a"}, false) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant2", grantingBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err) // Verify user can now access both documents diff --git a/db/attachment_test.go b/db/attachment_test.go index a29ccfa7f4..4574603f20 100644 --- a/db/attachment_test.go +++ b/db/attachment_test.go @@ -72,7 +72,7 @@ func TestBackupOldRevisionWithAttachments(t *testing.T) { var rev2Body Body rev2Data := `{"test": true, "updated": true, "_attachments": {"hello.txt": {"stub": true, "revpos": 1}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, docID, rev2Body, []string{"2-abc", rev1ID}, true) + _, _, err = collection.PutExistingRevWithBody(ctx, docID, rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) rev2ID := "2-abc" @@ -200,7 +200,7 @@ func TestAttachments(t *testing.T) { rev2Bstr := `{"_attachments": {"bye.txt": {"stub":true,"revpos":1,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}, "_rev": "2-f000"}` var body2B Body assert.NoError(t, base.JSONUnmarshal([]byte(rev2Bstr), &body2B)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body2B, []string{"2-f000", rev1id}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body2B, []string{"2-f000", rev1id}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't update document") } @@ -284,7 +284,7 @@ func TestAttachmentCASRetryAfterNewAttachment(t *testing.T) { rev2Data := `{"prop1":"value2", "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) log.Printf("Done creating rev 2 for key %s", key) @@ -315,7 +315,7 @@ func TestAttachmentCASRetryAfterNewAttachment(t *testing.T) { var rev3Body Body rev3Data := `{"prop1":"value3", "_attachments": {"hello.txt": {"revpos":2,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev3Data), &rev3Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) log.Printf("rev 3 done") @@ -347,7 +347,7 @@ func TestAttachmentCASRetryDuringNewAttachment(t *testing.T) { rev2Data := `{"prop1":"value2"}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) log.Printf("Done creating rev 2 for key %s", key) @@ -378,7 +378,7 @@ func TestAttachmentCASRetryDuringNewAttachment(t *testing.T) { var rev3Body Body rev3Data := `{"prop1":"value3", "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev3Data), &rev3Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) log.Printf("rev 3 done") @@ -567,7 +567,7 @@ func TestRetrieveAncestorAttachments(t *testing.T) { // Create document (rev 1) text := `{"key": "value", "version": "1a"}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) - doc, revID, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) + doc, revID, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) @@ -575,49 +575,49 @@ func TestRetrieveAncestorAttachments(t *testing.T) { text = `{"key": "value", "version": "2a", "_attachments": {"att1.txt": {"data": "YXR0MS50eHQ="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-a", "2-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "4a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "5a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-a", "4-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-a", "4-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "6a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"6-a", "5-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"6-a", "5-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3b", "type": "pruned"}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3b", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) } diff --git a/db/blip_handler.go b/db/blip_handler.go index 79a21c1c8b..407a41b9bf 100644 --- a/db/blip_handler.go +++ b/db/blip_handler.go @@ -1183,9 +1183,9 @@ func (bh *blipHandler) processRev(rq *blip.Message, stats *processRevStats) (err // bh.conflictResolver != nil represents an active SGR2 and BLIPClientTypeSGR2 represents a passive SGR2 forceAllowConflictingTombstone := newDoc.Deleted && (bh.conflictResolver != nil || bh.clientType == BLIPClientTypeSGR2) if bh.conflictResolver != nil { - _, _, err = bh.collection.PutExistingRevWithConflictResolution(bh.loggingCtx, newDoc, history, true, bh.conflictResolver, forceAllowConflictingTombstone, rawBucketDoc) + _, _, err = bh.collection.PutExistingRevWithConflictResolution(bh.loggingCtx, newDoc, history, true, bh.conflictResolver, forceAllowConflictingTombstone, rawBucketDoc, ExistingVersionWithUpdateToHLV) } else { - _, _, err = bh.collection.PutExistingRev(bh.loggingCtx, newDoc, history, revNoConflicts, forceAllowConflictingTombstone, rawBucketDoc) + _, _, err = bh.collection.PutExistingRev(bh.loggingCtx, newDoc, history, revNoConflicts, forceAllowConflictingTombstone, rawBucketDoc, ExistingVersionWithUpdateToHLV) } if err != nil { return err diff --git a/db/change_cache.go b/db/change_cache.go index 82f779f152..8f42deeb1e 100644 --- a/db/change_cache.go +++ b/db/change_cache.go @@ -497,7 +497,7 @@ func (c *changeCache) DocChanged(event sgbucket.FeedEvent) { // Now add the entry for the new doc revision: if len(rawUserXattr) > 0 { - collection.revisionCache.Remove(docID, syncData.CurrentRev) + collection.revisionCache.RemoveWithRev(docID, syncData.CurrentRev) } change := &LogEntry{ Sequence: syncData.Sequence, diff --git a/db/changes_test.go b/db/changes_test.go index 95dc721c66..1be2a18359 100644 --- a/db/changes_test.go +++ b/db/changes_test.go @@ -478,14 +478,14 @@ func BenchmarkChangesFeedDocUnmarshalling(b *testing.B) { // Create child rev 1 docBody["child"] = "A" - _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-A", revId}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-A", revId}, false, ExistingVersionWithUpdateToHLV) if err != nil { b.Fatalf("Error creating child1 rev: %v", err) } // Create child rev 2 docBody["child"] = "B" - _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-B", revId}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-B", revId}, false, ExistingVersionWithUpdateToHLV) if err != nil { b.Fatalf("Error creating child2 rev: %v", err) } diff --git a/db/crud.go b/db/crud.go index 586a9ca03d..fabe70e218 100644 --- a/db/crud.go +++ b/db/crud.go @@ -317,7 +317,7 @@ func (db *DatabaseCollectionWithUser) getRev(ctx context.Context, docid, revid s if revid != "" { // Get a specific revision body and history from the revision cache // (which will load them if necessary, by calling revCacheLoader, above) - revision, err = db.revisionCache.Get(ctx, docid, revid, includeBody, RevCacheOmitDelta) + revision, err = db.revisionCache.GetWithRev(ctx, docid, revid, includeBody, RevCacheOmitDelta) } else { // No rev ID given, so load active revision revision, err = db.revisionCache.GetActive(ctx, docid, includeBody) @@ -381,7 +381,7 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR return nil, nil, nil } - fromRevision, err := db.revisionCache.Get(ctx, docID, fromRevID, RevCacheOmitBody, RevCacheIncludeDelta) + fromRevision, err := db.revisionCache.GetWithRev(ctx, docID, fromRevID, RevCacheOmitBody, RevCacheIncludeDelta) // If the fromRevision is a removal cache entry (no body), but the user has access to that removal, then just // return 404 missing to indicate that the body of the revision is no longer available. @@ -421,7 +421,7 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR // db.DbStats.StatsDeltaSync().Add(base.StatKeyDeltaCacheMisses, 1) db.dbStats().DeltaSync().DeltaCacheMiss.Add(1) - toRevision, err := db.revisionCache.Get(ctx, docID, toRevID, RevCacheOmitBody, RevCacheIncludeDelta) + toRevision, err := db.revisionCache.GetWithRev(ctx, docID, toRevID, RevCacheOmitBody, RevCacheIncludeDelta) if err != nil { return nil, nil, err } @@ -866,6 +866,33 @@ func (db *DatabaseCollectionWithUser) OnDemandImportForWrite(ctx context.Context return nil } +// updateHLV updates the HLV in the sync data appropriately based on what type of document update event we are encountering +func (db *DatabaseCollectionWithUser) updateHLV(d *Document, docUpdateEvent DocUpdateType) (*Document, error) { + + if d.HLV == nil { + d.HLV = &HybridLogicalVector{} + } + switch docUpdateEvent { + case ExistingVersion: + // preserve any other logic on the HLV that has been done by the client, only update to cvCAS will be needed + d.HLV.CurrentVersionCAS = hlvExpandMacroCASValue + case Import: + // work to be done to decide if the VV needs updating here, pending CBG-3503 + case NewVersion, ExistingVersionWithUpdateToHLV: + // add a new entry to the version vector + newVVEntry := CurrentVersionVector{} + newVVEntry.SourceID = db.dbCtx.BucketUUID + newVVEntry.VersionCAS = hlvExpandMacroCASValue + err := d.SyncData.HLV.AddVersion(newVVEntry) + if err != nil { + return nil, err + } + // update the cvCAS on the SGWrite event too + d.HLV.CurrentVersionCAS = hlvExpandMacroCASValue + } + return d, nil +} + // Updates or creates a document. // The new body's BodyRev property must match the current revision's, if any. func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, body Body) (newRevID string, doc *Document, err error) { @@ -905,8 +932,9 @@ func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, bod return "", nil, err } + docUpdateEvent := NewVersion allowImport := db.UseXattrs() - doc, newRevID, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, expiry, nil, nil, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + doc, newRevID, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, expiry, nil, docUpdateEvent, nil, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { var isSgWrite bool var crc32Match bool @@ -1014,8 +1042,8 @@ func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, bod } // Adds an existing revision to a document along with its history (list of rev IDs.) -func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, forceAllConflicts bool, existingDoc *sgbucket.BucketDocument) (doc *Document, newRevID string, err error) { - return db.PutExistingRevWithConflictResolution(ctx, newDoc, docHistory, noConflicts, nil, forceAllConflicts, existingDoc) +func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, forceAllConflicts bool, existingDoc *sgbucket.BucketDocument, docUpdateEvent DocUpdateType) (doc *Document, newRevID string, err error) { + return db.PutExistingRevWithConflictResolution(ctx, newDoc, docHistory, noConflicts, nil, forceAllConflicts, existingDoc, docUpdateEvent) } // PutExistingRevWithConflictResolution Adds an existing revision to a document along with its history (list of rev IDs.) @@ -1023,7 +1051,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc // 1. If noConflicts == false, the revision will be added to the rev tree as a conflict // 2. If noConflicts == true and a conflictResolverFunc is not provided, a 409 conflict error will be returned // 3. If noConflicts == true and a conflictResolverFunc is provided, conflicts will be resolved and the result added to the document. -func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, conflictResolver *ConflictResolver, forceAllowConflictingTombstone bool, existingDoc *sgbucket.BucketDocument) (doc *Document, newRevID string, err error) { +func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, conflictResolver *ConflictResolver, forceAllowConflictingTombstone bool, existingDoc *sgbucket.BucketDocument, docUpdateEvent DocUpdateType) (doc *Document, newRevID string, err error) { newRev := docHistory[0] generation, _ := ParseRevID(ctx, newRev) if generation < 0 { @@ -1031,7 +1059,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx c } allowImport := db.UseXattrs() - doc, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, newDoc.DocExpiry, nil, existingDoc, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + doc, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, newDoc.DocExpiry, nil, docUpdateEvent, existingDoc, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { // (Be careful: this block can be invoked multiple times if there are races!) var isSgWrite bool @@ -1130,7 +1158,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx c return doc, newRev, err } -func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context, docid string, body Body, docHistory []string, noConflicts bool) (doc *Document, newRev string, err error) { +func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context, docid string, body Body, docHistory []string, noConflicts bool, docUpdateEvent DocUpdateType) (doc *Document, newRev string, err error) { err = validateAPIDocUpdate(body) if err != nil { return nil, "", err @@ -1155,7 +1183,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context newDoc.UpdateBody(body) - doc, newRevID, putExistingRevErr := db.PutExistingRev(ctx, newDoc, docHistory, noConflicts, false, nil) + doc, newRevID, putExistingRevErr := db.PutExistingRev(ctx, newDoc, docHistory, noConflicts, false, nil, docUpdateEvent) if putExistingRevErr != nil { return nil, "", putExistingRevErr @@ -1831,7 +1859,7 @@ type updateAndReturnDocCallback func(*Document) (resultDoc *Document, resultAtta // 1. Receive the updated document body in the response // 2. Specify the existing document body/xattr/cas, to avoid initial retrieval of the doc in cases that the current contents are already known (e.g. import). // On cas failure, the document will still be reloaded from the bucket as usual. -func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, docid string, allowImport bool, expiry uint32, opts *sgbucket.MutateInOptions, existingDoc *sgbucket.BucketDocument, callback updateAndReturnDocCallback) (doc *Document, newRevID string, err error) { +func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, docid string, allowImport bool, expiry uint32, opts *sgbucket.MutateInOptions, docUpdateEvent DocUpdateType, existingDoc *sgbucket.BucketDocument, callback updateAndReturnDocCallback) (doc *Document, newRevID string, err error) { key := realDocID(docid) if key == "" { @@ -1930,6 +1958,14 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do return } + // update the HLV values + doc, err = db.updateHLV(doc, docUpdateEvent) + if err != nil { + return + } + // update the mutate in options based on the above logic + updatedSpec = doc.SyncData.HLV.computeMacroExpansions() + deleteDoc = currentRevFromHistory.Deleted // Return the new raw document value for the bucket to store. @@ -1950,7 +1986,7 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do // Prior to saving doc, remove the revision in cache if createNewRevIDSkipped { - db.revisionCache.Remove(doc.ID, doc.CurrentRev) + db.revisionCache.RemoveWithRev(doc.ID, doc.CurrentRev) } base.DebugfCtx(ctx, base.KeyCRUD, "Saving doc (seq: #%d, id: %v rev: %v)", doc.Sequence, base.UD(doc.ID), doc.CurrentRev) @@ -1964,6 +2000,8 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do } } else if doc != nil { doc.Cas = casOut + // update the doc's HLV defined post macro expansion + doc = postWriteUpdateHLV(doc, casOut) } } @@ -2021,6 +2059,7 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do Expiry: doc.Expiry, Deleted: doc.History[newRevID].Deleted, _shallowCopyBody: storedDoc.Body(ctx), + CV: &CurrentVersionVector{VersionCAS: doc.HLV.Version, SourceID: doc.HLV.SourceID}, } if createNewRevIDSkipped { @@ -2083,6 +2122,19 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do return doc, newRevID, nil } +func postWriteUpdateHLV(doc *Document, casOut uint64) *Document { + if doc.HLV == nil { + return doc + } + if doc.HLV.Version == hlvExpandMacroCASValue { + doc.HLV.Version = casOut + } + if doc.HLV.CurrentVersionCAS == hlvExpandMacroCASValue { + doc.HLV.CurrentVersionCAS = casOut + } + return doc +} + func getAttachmentIDsForLeafRevisions(ctx context.Context, db *DatabaseCollectionWithUser, doc *Document, newRevID string) (map[string]struct{}, error) { leafAttachments := make(map[string]struct{}) @@ -2579,8 +2631,10 @@ func (db *DatabaseCollectionWithUser) CheckProposedRev(ctx context.Context, doci } const ( - xattrMacroCas = "cas" - xattrMacroValueCrc32c = "value_crc32c" + xattrMacroCas = "cas" + xattrMacroValueCrc32c = "value_crc32c" + versionVectorVrsMacro = "_vv.vrs" + versionVectorCVCASMacro = "_vv.cvCas" ) func macroExpandSpec(xattrName string) []sgbucket.MacroExpansionSpec { @@ -2599,3 +2653,11 @@ func xattrCasPath(xattrKey string) string { func xattrCrc32cPath(xattrKey string) string { return xattrKey + "." + xattrMacroValueCrc32c } + +func xattrCurrentVersionPath(xattrKey string) string { + return xattrKey + "." + versionVectorVrsMacro +} + +func xattrCurrentVersionCASPath(xattrKey string) string { + return xattrKey + "." + versionVectorCVCASMacro +} diff --git a/db/crud_test.go b/db/crud_test.go index ea8f99f355..38ca667b3d 100644 --- a/db/crud_test.go +++ b/db/crud_test.go @@ -75,7 +75,7 @@ func TestRevisionCacheLoad(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Flush the cache @@ -116,7 +116,7 @@ func TestHasAttachmentsFlag(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -127,7 +127,7 @@ func TestHasAttachmentsFlag(t *testing.T) { rev2a_body := unjson(`{"_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -153,7 +153,7 @@ func TestHasAttachmentsFlag(t *testing.T) { rev2b_body := unjson(`{"_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -251,7 +251,7 @@ func TestHasAttachmentsFlagForLegacyAttachments(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create rev 2-a with legacy attachment. @@ -280,7 +280,7 @@ func TestHasAttachmentsFlagForLegacyAttachments(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -315,7 +315,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -326,7 +326,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2a_body := Body{} rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -345,7 +345,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -388,7 +388,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3b_body := Body{} rev3b_body["version"] = "3b" rev3b_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) rev3b_body[BodyId] = doc.ID rev3b_body[BodyRev] = newRev rev3b_body[BodyDeleted] = true @@ -425,7 +425,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2c_body := Body{} rev2c_body["key1"] = prop_1000_bytes rev2c_body["version"] = "2c" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"2-c", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2c_body[BodyId] = doc.ID rev2c_body[BodyRev] = newRev assert.NoError(t, err, "add 2-c") @@ -447,7 +447,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3c_body["version"] = "3c" rev3c_body["key1"] = prop_1000_bytes rev3c_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-c"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-c"}, false, ExistingVersionWithUpdateToHLV) rev3c_body[BodyId] = doc.ID rev3c_body[BodyRev] = newRev rev3c_body[BodyDeleted] = true @@ -476,7 +476,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3a_body := Body{} rev3a_body["key1"] = prop_1000_bytes rev3a_body["version"] = "3a" - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"3-a", "2-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-a") revTree, err = getRevTreeList(ctx, collection.dataStore, "doc1", db.UseXattrs()) @@ -499,7 +499,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { // Create rev 2-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -510,7 +510,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev2a_body := Body{} rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -529,7 +529,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -574,7 +574,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev3b_body["version"] = "3b" rev3b_body["key1"] = prop_1000_bytes rev3b_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) rev3b_body[BodyId] = doc.ID rev3b_body[BodyRev] = newRev rev3b_body[BodyDeleted] = true @@ -609,17 +609,17 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { activeRevBody := Body{} activeRevBody["version"] = "...a" activeRevBody["key1"] = prop_1000_bytes - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"3-a", "2-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"4-a", "3-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 4-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"5-a", "4-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"5-a", "4-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 5-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"6-a", "5-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"6-a", "5-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 6-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"7-a", "6-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"7-a", "6-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 7-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"8-a", "7-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"8-a", "7-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 8-a") // Verify that 3-b is still present at this point @@ -628,7 +628,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { assert.NoError(t, err, "Rev 3-b should still exist") // Add one more rev that triggers pruning since gen(9-3) > revsLimit - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"9-a", "8-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"9-a", "8-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 9-a") // Verify that 3-b has been pruned @@ -657,7 +657,7 @@ func TestOldRevisionStorage(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a", "large": prop_1000_bytes} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 1-a") // Create rev 2-a @@ -666,7 +666,7 @@ func TestOldRevisionStorage(t *testing.T) { // 2-a log.Printf("Create rev 2-a") rev2a_body := Body{"key1": "value2", "version": "2a", "large": prop_1000_bytes} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev @@ -686,7 +686,7 @@ func TestOldRevisionStorage(t *testing.T) { // 3-a log.Printf("Create rev 3-a") rev3a_body := Body{"key1": "value2", "version": "3a", "large": prop_1000_bytes} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 3-a") rev3a_body[BodyId] = doc.ID rev3a_body[BodyRev] = newRev @@ -705,7 +705,7 @@ func TestOldRevisionStorage(t *testing.T) { // 3-a log.Printf("Create rev 2-b") rev2b_body := Body{"key1": "value2", "version": "2b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 2-b") // Retrieve the document: @@ -728,7 +728,7 @@ func TestOldRevisionStorage(t *testing.T) { // 6-a log.Printf("Create rev 6-a") rev6a_body := Body{"key1": "value2", "version": "6a", "large": prop_1000_bytes} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 6-a") rev6a_body[BodyId] = doc.ID rev6a_body[BodyRev] = newRev @@ -753,7 +753,7 @@ func TestOldRevisionStorage(t *testing.T) { // 6-a log.Printf("Create rev 3-b") rev3b_body := Body{"key1": "value2", "version": "3b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 3-b") // Same again and again @@ -772,12 +772,12 @@ func TestOldRevisionStorage(t *testing.T) { log.Printf("Create rev 3-c") rev3c_body := Body{"key1": "value2", "version": "3c", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 3-c") log.Printf("Create rev 3-d") rev3d_body := Body{"key1": "value2", "version": "3d", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3d_body, []string{"3-d", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3d_body, []string{"3-d", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 3-d") // Create new winning revision on 'b' branch. Triggers movement of 6-a to inline storage. Force cas retry, check document contents @@ -796,7 +796,7 @@ func TestOldRevisionStorage(t *testing.T) { // 7-b log.Printf("Create rev 7-b") rev7b_body := Body{"key1": "value2", "version": "7b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev7b_body, []string{"7-b", "6-b", "5-b", "4-b", "3-b"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev7b_body, []string{"7-b", "6-b", "5-b", "4-b", "3-b"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "add 7-b") } @@ -817,7 +817,7 @@ func TestOldRevisionStorageError(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "v": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -826,7 +826,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 2-a log.Printf("Create rev 2-a") rev2a_body := Body{"key1": "value2", "v": "2a"} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -845,7 +845,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 3-a log.Printf("Create rev 3-a") rev3a_body := Body{"key1": "value2", "v": "3a"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev3a_body[BodyId] = doc.ID rev3a_body[BodyRev] = newRev assert.NoError(t, err, "add 3-a") @@ -858,7 +858,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 3-a log.Printf("Create rev 2-b") rev2b_body := Body{"key1": "value2", "v": "2b"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -883,7 +883,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 6-a log.Printf("Create rev 6-a") rev6a_body := Body{"key1": "value2", "v": "6a"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) rev6a_body[BodyId] = doc.ID rev6a_body[BodyRev] = newRev assert.NoError(t, err, "add 6-a") @@ -909,7 +909,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 6-a log.Printf("Create rev 3-b") rev3b_body := Body{"key1": "value2", "v": "3b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-b") // Same again @@ -929,7 +929,7 @@ func TestOldRevisionStorageError(t *testing.T) { log.Printf("Create rev 3-c") rev3c_body := Body{"key1": "value2", "v": "3c"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-c") } @@ -946,7 +946,7 @@ func TestLargeSequence(t *testing.T) { // Write a doc via SG body := Body{"key1": "largeSeqTest"} - _, _, err := collection.PutExistingRevWithBody(ctx, "largeSeqDoc", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "largeSeqDoc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add largeSeqDoc") syncData, err := collection.GetDocSyncData(ctx, "largeSeqDoc") @@ -1021,7 +1021,7 @@ func TestMalformedRevisionStorageRecovery(t *testing.T) { // 6-a log.Printf("Attempt to create rev 3-c") rev3c_body := Body{"key1": "value2", "v": "3c"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-c") } @@ -1033,16 +1033,16 @@ func BenchmarkDatabaseGet1xRev(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar", "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) largeDoc := make([]byte, 1000000) longBody := Body{"val": string(largeDoc), "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) var shortWithAttachmentsDataBody Body shortWithAttachmentsData := `{"test": true, "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}, "rev":"1-a"}` _ = base.JSONUnmarshal([]byte(shortWithAttachmentsData), &shortWithAttachmentsDataBody) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) b.Run("ShortLatest", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1061,9 +1061,9 @@ func BenchmarkDatabaseGet1xRev(b *testing.B) { }) updateBody := Body{"rev": "2-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) b.Run("ShortOld", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1090,16 +1090,16 @@ func BenchmarkDatabaseGetRev(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar", "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) largeDoc := make([]byte, 1000000) longBody := Body{"val": string(largeDoc), "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) var shortWithAttachmentsDataBody Body shortWithAttachmentsData := `{"test": true, "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}, "rev":"1-a"}` _ = base.JSONUnmarshal([]byte(shortWithAttachmentsData), &shortWithAttachmentsDataBody) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) b.Run("ShortLatest", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1118,9 +1118,9 @@ func BenchmarkDatabaseGetRev(b *testing.B) { }) updateBody := Body{"rev": "2-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) b.Run("ShortOld", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1148,7 +1148,7 @@ func BenchmarkHandleRevDelta(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) getDelta := func(newDoc *Document) { deltaSrcRev, _ := collection.GetRev(ctx, "doc1", "1-a", false, nil) @@ -1197,18 +1197,18 @@ func TestGetAvailableRevAttachments(t *testing.T) { // Create the very first revision of the document with attachment; let's call this as rev 1-a payload := `{"sku":"6213100","_attachments":{"camera.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - _, rev, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"1-a"}, false) + _, rev, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") ancestor := rev // Ancestor revision // Create the second revision of the document with attachment reference; payload = `{"sku":"6213101","_attachments":{"camera.txt":{"stub":true,"revpos":1}}}` - _, rev, err = collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"2-a", "1-a"}, false) + _, rev, err = collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) parent := rev // Immediate ancestor or parent revision assert.NoError(t, err, "Couldn't create document") payload = `{"sku":"6213102","_attachments":{"camera.txt":{"stub":true,"revpos":1}}}` - doc, _, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"3-a", "2-a"}, false) + doc, _, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") // Get available attachments by immediate ancestor revision or parent revision @@ -1235,11 +1235,11 @@ func TestGet1xRevAndChannels(t *testing.T) { docId := "dd6d2dcc679d12b9430a9787bab45b33" payload := `{"sku":"6213100","_attachments":{"camera.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - doc1, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false) + doc1, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") payload = `{"sku":"6213101","_attachments":{"lens.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - doc2, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false) + doc2, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") // Get the 1x revision from document with list revision enabled @@ -1298,7 +1298,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Create the first revision of the document docId := "356779a9a1696714480f57fa3fb66d4c" payload := `{"city":"Los Angeles"}` - doc, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false) + doc, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") assert.NotEmpty(t, doc, "Document shouldn't be empty") assert.Equal(t, "1-a", rev1, "Provided input revision ID should be returned") @@ -1321,7 +1321,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Create the second revision of the document payload = `{"city":"Hollywood"}` - doc, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false) + doc, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't create document") assert.NotEmpty(t, doc, "Document shouldn't be empty") assert.Equal(t, "2-a", rev2, "Provided input revision ID should be returned") diff --git a/db/database.go b/db/database.go index 7c1909a547..036369f12b 100644 --- a/db/database.go +++ b/db/database.go @@ -48,6 +48,15 @@ const ( DBCompactRunning ) +const ( + Import DocUpdateType = iota + NewVersion + ExistingVersion + ExistingVersionWithUpdateToHLV +) + +type DocUpdateType uint32 + const ( DefaultRevsLimitNoConflicts = 50 DefaultRevsLimitConflicts = 100 @@ -88,6 +97,7 @@ type DatabaseContext struct { MetadataStore base.DataStore // Storage for database metadata (anything that isn't an end-user's/customer's documents) Bucket base.Bucket // Storage BucketSpec base.BucketSpec // The BucketSpec + BucketUUID string // The bucket UUID for the bucket the database is created against BucketLock sync.RWMutex // Control Access to the underlying bucket object mutationListener changeListener // Caching feed listener ImportListener *importListener // Import feed listener @@ -396,6 +406,11 @@ func NewDatabaseContext(ctx context.Context, dbName string, bucket base.Bucket, metadataStore = bucket.DefaultDataStore() } + bucketUUID, err := bucket.UUID() + if err != nil { + return nil, err + } + // Register the cbgt pindex type for the configGroup RegisterImportPindexImpl(ctx, options.GroupID) @@ -404,6 +419,7 @@ func NewDatabaseContext(ctx context.Context, dbName string, bucket base.Bucket, UUID: cbgt.NewUUID(), MetadataStore: metadataStore, Bucket: bucket, + BucketUUID: bucketUUID, StartTime: time.Now(), autoImport: autoImport, Options: options, diff --git a/db/database_test.go b/db/database_test.go index f34123e791..f60d87d24b 100644 --- a/db/database_test.go +++ b/db/database_test.go @@ -294,7 +294,7 @@ func TestDatabase(t *testing.T) { body["key2"] = int64(4444) history := []string{"4-four", "3-three", "2-488724414d0ed6b398d6d2aeb228d797", "1-cb0c9a22be0e5a1b01084ec019defa81"} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", body, history, false) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", body, history, false, ExistingVersionWithUpdateToHLV) body[BodyId] = doc.ID body[BodyRev] = newRev assert.NoError(t, err, "PutExistingRev failed") @@ -1020,18 +1020,18 @@ func TestRepeatedConflict(t *testing.T) { // Create rev 1 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 body["channels"] = []string{"all", "2b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") body["n"] = 3 body["channels"] = []string{"all", "2a"} - _, newRev, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) + _, newRev, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") // Get the _rev that was set in the body by PutExistingRevWithBody() and make assertions on it @@ -1040,7 +1040,7 @@ func TestRepeatedConflict(t *testing.T) { // Remove the _rev key from the body, and call PutExistingRevWithBody() again, which should re-add it delete(body, BodyRev) - _, newRev, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) + _, newRev, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err) // The _rev should pass the same assertions as before, since PutExistingRevWithBody() should re-add it @@ -1068,7 +1068,7 @@ func TestConflicts(t *testing.T) { // Create rev 1 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Wait for rev to be cached @@ -1081,11 +1081,11 @@ func TestConflicts(t *testing.T) { // Create two conflicting changes: body["n"] = 2 body["channels"] = []string{"all", "2b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") body["n"] = 3 body["channels"] = []string{"all", "2a"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") cacheWaiter.Add(2) @@ -1213,55 +1213,55 @@ func TestNoConflictsMode(t *testing.T) { // Create revs 1 and 2 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") // Try to create a conflict branching from rev 1: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assertHTTPError(t, err, 409) // Try to create a conflict with no common ancestor: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-c", "1-c"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-c", "1-c"}, false, ExistingVersionWithUpdateToHLV) assertHTTPError(t, err, 409) // Try to create a conflict with a longer history: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-d", "3-d", "2-d", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-d", "3-d", "2-d", "1-a"}, false, ExistingVersionWithUpdateToHLV) assertHTTPError(t, err, 409) // Try to create a conflict with no history: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-e"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-e"}, false, ExistingVersionWithUpdateToHLV) assertHTTPError(t, err, 409) // Create a non-conflict with a longer history, ending in a deletion: body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 4-a") delete(body, BodyDeleted) // Try to resurrect the document with a conflicting branch - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-f", "3-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-f", "3-a"}, false, ExistingVersionWithUpdateToHLV) assertHTTPError(t, err, 409) // Resurrect the tombstoned document with a disconnected branch): - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-f"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-f"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-f") // Tombstone the resurrected branch body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-f", "1-f"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-f", "1-f"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-f") delete(body, BodyDeleted) // Resurrect the tombstoned document with a valid history (descendents of leaf) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-f", "4-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-f", "4-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 5-f") delete(body, BodyDeleted) // Create a new document with a longer history: - _, _, err = collection.PutExistingRevWithBody(ctx, "COD", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "COD", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add COD") delete(body, BodyDeleted) @@ -1289,34 +1289,34 @@ func TestAllowConflictsFalseTombstoneExistingConflict(t *testing.T) { // Create documents with multiple non-deleted branches log.Printf("Creating docs") body := Body{"n": 1} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") body["n"] = 3 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") // Set AllowConflicts to false db.Options.AllowConflicts = base.BoolPtr(false) // Attempt to tombstone a non-leaf node of a conflicted document - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.True(t, err != nil, "expected error tombstoning non-leaf") // Tombstone the non-winning branch of a conflicted document @@ -1366,27 +1366,27 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Create documents with multiple non-deleted branches log.Printf("Creating docs") body := Body{"n": 1} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-b") body["n"] = 3 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 2-a") // Set AllowConflicts to false @@ -1395,12 +1395,12 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Attempt to tombstone a non-leaf node of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.True(t, err != nil, "expected error tombstoning non-leaf") // Tombstone the non-winning branch of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-a (tombstone)") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1408,7 +1408,7 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Tombstone the winning branch of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"3-b", "2-b"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-b (tombstone)") doc, err = collection.GetDocument(ctx, "doc2", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1417,7 +1417,7 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Set revs_limit=1, then tombstone non-winning branch of a conflicted document. Validate retrieval still works. body[BodyDeleted] = true db.RevsLimit = uint32(1) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"3-a", "2-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "add 3-a (tombstone)") doc, err = collection.GetDocument(ctx, "doc3", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1453,7 +1453,7 @@ func TestSyncFnOnPush(t *testing.T) { body["channels"] = "clibup" history := []string{"4-four", "3-three", "2-488724414d0ed6b398d6d2aeb228d797", rev1id} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, history, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, history, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "PutExistingRev failed") // Check that the doc has the correct channel (test for issue #300) @@ -2145,7 +2145,7 @@ func TestConcurrentPushSameNewNonWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Emily", "age": 20} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 3-b") } } @@ -2160,29 +2160,29 @@ func TestConcurrentPushSameNewNonWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Charlie", "age": 10} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 4-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-b") enableCallback = true body = Body{"name": "Emily", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 3-b") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2203,7 +2203,7 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Charlie", "age": 10, BodyDeleted: true} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't add revision 4-a (tombstone)") } } @@ -2218,19 +2218,19 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-b") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2240,7 +2240,7 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { enableCallback = true body = Body{"name": "Charlie", "age": 10, BodyDeleted: true} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't add revision 4-a (tombstone)") doc, err = collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2261,7 +2261,7 @@ func TestConcurrentPushDifferentUpdateNonWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Joshua", "age": 11} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b1", "2-b", "1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b1", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't add revision 3-b1") } } @@ -2276,29 +2276,29 @@ func TestConcurrentPushDifferentUpdateNonWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Charlie", "age": 10} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 4-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Adding revision 2-b") enableCallback = true body = Body{"name": "Liam", "age": 12} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b2", "2-b", "1-a"}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b2", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Couldn't add revision 3-b2") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2332,7 +2332,7 @@ func TestIncreasingRecentSequences(t *testing.T) { enableCallback = false // Write a doc collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-abc", revid}, true) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-abc", revid}, true, ExistingVersionWithUpdateToHLV) assert.NoError(t, err) } } @@ -2349,7 +2349,7 @@ func TestIncreasingRecentSequences(t *testing.T) { assert.NoError(t, err) enableCallback = true - doc, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-abc", "2-abc", revid}, true) + doc, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-abc", "2-abc", revid}, true, ExistingVersionWithUpdateToHLV) assert.NoError(t, err) assert.True(t, sort.IsSorted(base.SortedUint64Slice(doc.SyncData.RecentSequences))) @@ -2797,72 +2797,62 @@ func Test_invalidateAllPrincipalsCache(t *testing.T) { } func Test_resyncDocument(t *testing.T) { - testCases := []struct { - useXattr bool - }{ - {useXattr: true}, - {useXattr: false}, + if !base.TestUseXattrs() { + t.Skip("Walrus doesn't support xattr") } + db, ctx := setupTestDB(t) + defer db.Close(ctx) - for _, testCase := range testCases { - t.Run(fmt.Sprintf("Test_resyncDocument with useXattr: %t", testCase.useXattr), func(t *testing.T) { - if !base.TestUseXattrs() && testCase.useXattr { - t.Skip("Don't run xattr tests on non xattr tests") - } - db, ctx := setupTestDB(t) - defer db.Close(ctx) - - db.Options.EnableXattr = testCase.useXattr - db.Options.QueryPaginationLimit = 100 - collection := GetSingleDatabaseCollectionWithUser(t, db) + db.Options.EnableXattr = true + db.Options.QueryPaginationLimit = 100 + collection := GetSingleDatabaseCollectionWithUser(t, db) - syncFn := ` + syncFn := ` function sync(doc, oldDoc){ channel("channel." + "ABC"); } ` - _, err := collection.UpdateSyncFun(ctx, syncFn) - require.NoError(t, err) + _, err := collection.UpdateSyncFun(ctx, syncFn) + require.NoError(t, err) - docID := uuid.NewString() + docID := uuid.NewString() - updateBody := make(map[string]interface{}) - updateBody["val"] = "value" - _, doc, err := collection.Put(ctx, docID, updateBody) - require.NoError(t, err) - assert.NotNil(t, doc) + updateBody := make(map[string]interface{}) + updateBody["val"] = "value" + _, doc, err := collection.Put(ctx, docID, updateBody) + require.NoError(t, err) + assert.NotNil(t, doc) - syncFn = ` + syncFn = ` function sync(doc, oldDoc){ channel("channel." + "ABC12332423234"); } ` - _, err = collection.UpdateSyncFun(ctx, syncFn) - require.NoError(t, err) - - _, _, err = collection.resyncDocument(ctx, docID, realDocID(docID), false, []uint64{10}) - require.NoError(t, err) - err = collection.WaitForPendingChanges(ctx) - require.NoError(t, err) + _, err = collection.UpdateSyncFun(ctx, syncFn) + require.NoError(t, err) - syncData, err := collection.GetDocSyncData(ctx, docID) - assert.NoError(t, err) + _, _, err = collection.resyncDocument(ctx, docID, realDocID(docID), false, []uint64{10}) + require.NoError(t, err) + err = collection.WaitForPendingChanges(ctx) + require.NoError(t, err) - assert.Len(t, syncData.ChannelSet, 2) - assert.Len(t, syncData.Channels, 2) - found := false + syncData, err := collection.GetDocSyncData(ctx, docID) + assert.NoError(t, err) - for _, chSet := range syncData.ChannelSet { - if chSet.Name == "channel.ABC12332423234" { - found = true - break - } - } + assert.Len(t, syncData.ChannelSet, 2) + assert.Len(t, syncData.Channels, 2) + found := false - assert.True(t, found) - assert.Equal(t, 2, int(db.DbStats.Database().SyncFunctionCount.Value())) - }) + for _, chSet := range syncData.ChannelSet { + if chSet.Name == "channel.ABC12332423234" { + found = true + break + } } + + assert.True(t, found) + assert.Equal(t, 2, int(db.DbStats.Database().SyncFunctionCount.Value())) + } func Test_getUpdatedDocument(t *testing.T) { diff --git a/db/document.go b/db/document.go index 83d67f5389..94fc62ad48 100644 --- a/db/document.go +++ b/db/document.go @@ -41,6 +41,7 @@ const ( DocUnmarshalHistory // Unmarshals history + rev + CAS only DocUnmarshalRev // Unmarshals rev + CAS only DocUnmarshalCAS // Unmarshals CAS (for import check) only + DocUnmarshalVV // Unmarshals Version Vector only DocUnmarshalNone // No unmarshalling (skips import/upgrade check) ) @@ -64,23 +65,24 @@ type ChannelSetEntry struct { // The sync-gateway metadata stored in the "_sync" property of a Couchbase document. type SyncData struct { - CurrentRev string `json:"rev"` - NewestRev string `json:"new_rev,omitempty"` // Newest rev, if different from CurrentRev - Flags uint8 `json:"flags,omitempty"` - Sequence uint64 `json:"sequence,omitempty"` - UnusedSequences []uint64 `json:"unused_sequences,omitempty"` // unused sequences due to update conflicts/CAS retry - RecentSequences []uint64 `json:"recent_sequences,omitempty"` // recent sequences for this doc - used in server dedup handling - Channels channels.ChannelMap `json:"channels,omitempty"` - Access UserAccessMap `json:"access,omitempty"` - RoleAccess UserAccessMap `json:"role_access,omitempty"` - Expiry *time.Time `json:"exp,omitempty"` // Document expiry. Information only - actual expiry/delete handling is done by bucket storage. Needs to be pointer for omitempty to work (see https://github.com/golang/go/issues/4357) - Cas string `json:"cas"` // String representation of a cas value, populated via macro expansion - Crc32c string `json:"value_crc32c"` // String representation of crc32c hash of doc body, populated via macro expansion - Crc32cUserXattr string `json:"user_xattr_value_crc32c,omitempty"` // String representation of crc32c hash of user xattr - TombstonedAt int64 `json:"tombstoned_at,omitempty"` // Time the document was tombstoned. Used for view compaction - Attachments AttachmentsMeta `json:"attachments,omitempty"` - ChannelSet []ChannelSetEntry `json:"channel_set"` - ChannelSetHistory []ChannelSetEntry `json:"channel_set_history"` + CurrentRev string `json:"rev"` + NewestRev string `json:"new_rev,omitempty"` // Newest rev, if different from CurrentRev + Flags uint8 `json:"flags,omitempty"` + Sequence uint64 `json:"sequence,omitempty"` + UnusedSequences []uint64 `json:"unused_sequences,omitempty"` // unused sequences due to update conflicts/CAS retry + RecentSequences []uint64 `json:"recent_sequences,omitempty"` // recent sequences for this doc - used in server dedup handling + Channels channels.ChannelMap `json:"channels,omitempty"` + Access UserAccessMap `json:"access,omitempty"` + RoleAccess UserAccessMap `json:"role_access,omitempty"` + Expiry *time.Time `json:"exp,omitempty"` // Document expiry. Information only - actual expiry/delete handling is done by bucket storage. Needs to be pointer for omitempty to work (see https://github.com/golang/go/issues/4357) + Cas string `json:"cas"` // String representation of a cas value, populated via macro expansion + Crc32c string `json:"value_crc32c"` // String representation of crc32c hash of doc body, populated via macro expansion + Crc32cUserXattr string `json:"user_xattr_value_crc32c,omitempty"` // String representation of crc32c hash of user xattr + TombstonedAt int64 `json:"tombstoned_at,omitempty"` // Time the document was tombstoned. Used for view compaction + Attachments AttachmentsMeta `json:"attachments,omitempty"` + ChannelSet []ChannelSetEntry `json:"channel_set"` + ChannelSetHistory []ChannelSetEntry `json:"channel_set_history"` + HLV *HybridLogicalVector `json:"_vv,omitempty"` // Only used for performance metrics: TimeSaved time.Time `json:"time_saved,omitempty"` // Timestamp of save. @@ -175,11 +177,12 @@ type Document struct { Cas uint64 // Document cas rawUserXattr []byte // Raw user xattr as retrieved from the bucket - Deleted bool - DocExpiry uint32 - RevID string - DocAttachments AttachmentsMeta - inlineSyncData bool + Deleted bool + DocExpiry uint32 + RevID string + DocAttachments AttachmentsMeta + inlineSyncData bool + currentRevChannels base.Set // A base.Set of the current revision's channels (determined by SyncData.Channels at UnmarshalJSON time) } type historyOnlySyncData struct { @@ -967,6 +970,7 @@ func (doc *Document) updateChannels(ctx context.Context, newChannels base.Set) ( doc.updateChannelHistory(channel, doc.Sequence, true) } } + doc.currentRevChannels = newChannels if changed != nil { base.InfofCtx(ctx, base.KeyCRUD, "\tDoc %q / %q in channels %q", base.UD(doc.ID), doc.CurrentRev, base.UD(newChannels)) changedChannels, err = channels.SetFromArray(changed, channels.KeepStar) @@ -1076,6 +1080,17 @@ func (doc *Document) UnmarshalJSON(data []byte) error { doc.SyncData = *syncData.SyncData } + // determine current revision's channels and store in-memory (avoids doc.Channels iteration at access-check time) + if len(doc.Channels) > 0 { + ch := base.SetOf() + for channelName, channelRemoval := range doc.Channels { + if channelRemoval == nil || channelRemoval.Seq == 0 { + ch.Add(channelName) + } + } + doc.currentRevChannels = ch + } + // Unmarshal the rest of the doc body as map[string]interface{} if err := doc._body.Unmarshal(data); err != nil { return pkgerrors.WithStack(base.RedactErrorf("Failed to UnmarshalJSON() doc with id: %s. Error: %v", base.UD(doc.ID), err)) @@ -1130,7 +1145,6 @@ func (doc *Document) UnmarshalWithXattr(ctx context.Context, data []byte, xdata if unmarshalLevel == DocUnmarshalAll && len(data) > 0 { return doc._body.Unmarshal(data) } - case DocUnmarshalNoHistory: // Unmarshal sync metadata only, excluding history doc.SyncData = SyncData{} @@ -1174,6 +1188,14 @@ func (doc *Document) UnmarshalWithXattr(ctx context.Context, data []byte, xdata Cas: casOnlyMeta.Cas, } doc._rawBody = data + case DocUnmarshalVV: + tmpData := SyncData{} + unmarshalErr := base.JSONUnmarshal(xdata, &tmpData) + if unmarshalErr != nil { + return base.RedactErrorf("Failed to UnmarshalWithXattr() doc with id: %s (DocUnmarshalVV). Error: %w", base.UD(doc.ID), unmarshalErr) + } + doc.SyncData.HLV = tmpData.HLV + doc._rawBody = data } // If there's no body, but there is an xattr, set deleted flag and initialize an empty body @@ -1215,3 +1237,17 @@ func (doc *Document) MarshalWithXattr() (data []byte, xdata []byte, err error) { return data, xdata, nil } + +// HasCurrentVersion Compares the specified CV with the fetched documents CV, returns error on mismatch between the two +func (d *Document) HasCurrentVersion(cv CurrentVersionVector) error { + if d.HLV == nil { + return base.RedactErrorf("no HLV present in fetched doc %s", base.UD(d.ID)) + } + + // fetch the current version for the loaded doc and compare against the CV specified in the IDandCV key + fetchedDocSource, fetchedDocVersion := d.HLV.GetCurrentVersion() + if fetchedDocSource != cv.SourceID || fetchedDocVersion != cv.VersionCAS { + return base.RedactErrorf("mismatch between specified current version and fetched document current version for doc %s", base.UD(d.ID)) + } + return nil +} diff --git a/db/document_test.go b/db/document_test.go index 16fbd97ff4..6301e99ec3 100644 --- a/db/document_test.go +++ b/db/document_test.go @@ -14,6 +14,7 @@ import ( "bytes" "encoding/binary" "log" + "reflect" "testing" "github.com/couchbase/sync_gateway/base" @@ -190,6 +191,106 @@ func BenchmarkUnmarshalBody(b *testing.B) { } } +const doc_meta_with_vv = `{ + "rev": "3-89758294abc63157354c2b08547c2d21", + "sequence": 7, + "recent_sequences": [ + 5, + 6, + 7 + ], + "history": { + "revs": [ + "1-fc591a068c153d6c3d26023d0d93dcc1", + "2-0eab03571bc55510c8fc4bfac9fe4412", + "3-89758294abc63157354c2b08547c2d21" + ], + "parents": [ + -1, + 0, + 1 + ], + "channels": [ + [ + "ABC", + "DEF" + ], + [ + "ABC", + "DEF", + "GHI" + ], + [ + "ABC", + "GHI" + ] + ] + }, + "channels": { + "ABC": null, + "DEF": { + "seq": 7, + "rev": "3-89758294abc63157354c2b08547c2d21" + }, + "GHI": null + }, + "_vv":{ + "cvCas":"0x40e2010000000000", + "src":"cb06dc003846116d9b66d2ab23887a96", + "vrs":"0x40e2010000000000", + "mv":{ + "s_LhRPsa7CpjEvP5zeXTXEBA":"c0ff05d7ac059a16", + "s_NqiIe0LekFPLeX4JvTO6Iw":"1c008cd6ac059a16" + }, + "pv":{ + "s_YZvBpEaztom9z5V/hDoeIw":"f0ff44d6ac059a16" + } + }, + "cas": "", + "time_saved": "2017-10-25T12:45:29.622450174-07:00" + }` + +func TestParseVersionVectorSyncData(t *testing.T) { + mv := make(map[string]uint64) + pv := make(map[string]uint64) + mv["s_LhRPsa7CpjEvP5zeXTXEBA"] = 1628620455147864000 + mv["s_NqiIe0LekFPLeX4JvTO6Iw"] = 1628620455139868700 + pv["s_YZvBpEaztom9z5V/hDoeIw"] = 1628620455135215600 + + ctx := base.TestCtx(t) + + doc_meta := []byte(doc_meta_with_vv) + doc, err := unmarshalDocumentWithXattr(ctx, "doc_1k", nil, doc_meta, nil, 1, DocUnmarshalVV) + require.NoError(t, err) + + // assert on doc version vector values + assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) + assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) + assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) + assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) + assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) + + doc, err = unmarshalDocumentWithXattr(ctx, "doc1", nil, doc_meta, nil, 1, DocUnmarshalAll) + require.NoError(t, err) + + // assert on doc version vector values + assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) + assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) + assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) + assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) + assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) + + doc, err = unmarshalDocumentWithXattr(ctx, "doc1", nil, doc_meta, nil, 1, DocUnmarshalNoHistory) + require.NoError(t, err) + + // assert on doc version vector values + assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) + assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) + assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) + assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) + assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) +} + func TestParseXattr(t *testing.T) { zeroByte := byte(0) // Build payload for single xattr pair and body diff --git a/db/hybrid_logical_vector.go b/db/hybrid_logical_vector.go index 686ed33575..433e4bbd2c 100644 --- a/db/hybrid_logical_vector.go +++ b/db/hybrid_logical_vector.go @@ -10,10 +10,15 @@ package db import ( "fmt" + "math" + sgbucket "github.com/couchbase/sg-bucket" "github.com/couchbase/sync_gateway/base" ) +// hlvExpandMacroCASValue causes the field to be populated by CAS value by macro expansion +const hlvExpandMacroCASValue = math.MaxUint64 + type HybridLogicalVector struct { CurrentVersionCAS uint64 // current version cas (or cvCAS) stores the current CAS at the time of replication SourceID string // source bucket uuid of where this entry originated from @@ -36,10 +41,6 @@ type PersistedHybridLogicalVector struct { PreviousVersions map[string]string `json:"pv,omitempty"` } -type PersistedVersionVector struct { - PersistedHybridLogicalVector `json:"_vv"` -} - // NewHybridLogicalVector returns a HybridLogicalVector struct with maps initialised in the struct func NewHybridLogicalVector() HybridLogicalVector { return HybridLogicalVector{ @@ -67,7 +68,13 @@ func (hlv *HybridLogicalVector) IsInConflict(otherVector HybridLogicalVector) bo // previous versions on the HLV if needed func (hlv *HybridLogicalVector) AddVersion(newVersion CurrentVersionVector) error { if newVersion.VersionCAS < hlv.Version { - return fmt.Errorf("attempting to add new verison vector entry with a CAS that is less than the current version CAS value") + return fmt.Errorf("attempting to add new verison vector entry with a CAS that is less than the current version CAS value. Current cas: %d new cas %d", hlv.Version, newVersion.VersionCAS) + } + // check if this is the first time we're adding a source - version pair + if hlv.SourceID == "" { + hlv.Version = newVersion.VersionCAS + hlv.SourceID = newVersion.SourceID + return nil } // if new entry has the same source we simple just update the version if newVersion.SourceID == hlv.SourceID { @@ -75,6 +82,9 @@ func (hlv *HybridLogicalVector) AddVersion(newVersion CurrentVersionVector) erro return nil } // if we get here this is a new version from a different sourceID thus need to move current sourceID to previous versions and update current version + if hlv.PreviousVersions == nil { + hlv.PreviousVersions = make(map[string]uint64) + } hlv.PreviousVersions[hlv.SourceID] = hlv.Version hlv.Version = newVersion.VersionCAS hlv.SourceID = newVersion.SourceID @@ -170,7 +180,7 @@ func (hlv *HybridLogicalVector) GetVersion(sourceID string) uint64 { return latestVersion } -func (hlv *HybridLogicalVector) MarshalJSON() ([]byte, error) { +func (hlv HybridLogicalVector) MarshalJSON() ([]byte, error) { persistedHLV, err := hlv.convertHLVToPersistedFormat() if err != nil { @@ -181,7 +191,7 @@ func (hlv *HybridLogicalVector) MarshalJSON() ([]byte, error) { } func (hlv *HybridLogicalVector) UnmarshalJSON(inputjson []byte) error { - persistedJSON := PersistedVersionVector{} + persistedJSON := PersistedHybridLogicalVector{} err := base.JSONUnmarshal(inputjson, &persistedJSON) if err != nil { return err @@ -191,13 +201,16 @@ func (hlv *HybridLogicalVector) UnmarshalJSON(inputjson []byte) error { return nil } -func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedVersionVector, error) { - persistedHLV := PersistedVersionVector{} +func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedHybridLogicalVector, error) { + persistedHLV := PersistedHybridLogicalVector{} var cvCasByteArray []byte + var vrsCasByteArray []byte if hlv.CurrentVersionCAS != 0 { cvCasByteArray = base.Uint64CASToLittleEndianHex(hlv.CurrentVersionCAS) } - vrsCasByteArray := base.Uint64CASToLittleEndianHex(hlv.Version) + if hlv.Version != 0 { + vrsCasByteArray = base.Uint64CASToLittleEndianHex(hlv.Version) + } pvPersistedFormat, err := convertMapToPersistedFormat(hlv.PreviousVersions) if err != nil { @@ -216,7 +229,7 @@ func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedVersion return &persistedHLV, nil } -func (hlv *HybridLogicalVector) convertPersistedHLVToInMemoryHLV(persistedJSON PersistedVersionVector) { +func (hlv *HybridLogicalVector) convertPersistedHLVToInMemoryHLV(persistedJSON PersistedHybridLogicalVector) { hlv.CurrentVersionCAS = base.HexCasToUint64(persistedJSON.CurrentVersionCAS) hlv.SourceID = persistedJSON.SourceID // convert the hex cas to uint64 cas @@ -256,3 +269,17 @@ func convertMapToInMemoryFormat(persistedMap map[string]string) map[string]uint6 } return returnedMap } + +// computeMacroExpansions returns the mutate in spec needed for the document update based off the outcome in updateHLV +func (hlv *HybridLogicalVector) computeMacroExpansions() []sgbucket.MacroExpansionSpec { + var outputSpec []sgbucket.MacroExpansionSpec + if hlv.Version == hlvExpandMacroCASValue { + spec := sgbucket.NewMacroExpansionSpec(xattrCurrentVersionPath(base.SyncXattrName), sgbucket.MacroCas) + outputSpec = append(outputSpec, spec) + } + if hlv.CurrentVersionCAS == hlvExpandMacroCASValue { + spec := sgbucket.NewMacroExpansionSpec(xattrCurrentVersionCASPath(base.SyncXattrName), sgbucket.MacroCas) + outputSpec = append(outputSpec, spec) + } + return outputSpec +} diff --git a/db/import.go b/db/import.go index b578ab1fae..6db16664b9 100644 --- a/db/import.go +++ b/db/import.go @@ -139,7 +139,8 @@ func (db *DatabaseCollectionWithUser) importDoc(ctx context.Context, docid strin existingDoc.Expiry = *expiry } - docOut, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, true, existingDoc.Expiry, mutationOptions, existingDoc, func(doc *Document) (resultDocument *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + docUpdateEvent := Import + docOut, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, true, existingDoc.Expiry, mutationOptions, docUpdateEvent, existingDoc, func(doc *Document) (resultDocument *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { // Perform cas mismatch check first, as we want to identify cas mismatch before triggering migrate handling. // If there's a cas mismatch, the doc has been updated since the version that triggered the import. Handling depends on import mode. if doc.Cas != existingDoc.Cas { diff --git a/db/query_test.go b/db/query_test.go index 2ef43c0f82..81d262c96f 100644 --- a/db/query_test.go +++ b/db/query_test.go @@ -372,7 +372,7 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { // Create 10 added documents for i := 1; i <= 10; i++ { id := "created" + strconv.Itoa(i) - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "1-a", revId) docIdFlagMap[doc.ID] = uint8(0x0) @@ -385,12 +385,12 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { // Create 10 deleted documents for i := 1; i <= 10; i++ { id := "deleted" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "1-a", revId) body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "2-a", revId, "Couldn't create tombstone revision") @@ -402,22 +402,22 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-a", revId, "Couldn't create tombstone revision") @@ -429,27 +429,27 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched|deleted" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) body[BodyDeleted] = true - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-a", revId, "Couldn't create tombstone revision") body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-b", "2-b"}, false) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-b", revId, "Couldn't create tombstone revision") @@ -461,17 +461,17 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched|conflict" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) diff --git a/db/revision_cache_bypass.go b/db/revision_cache_bypass.go index 049faeb937..1b05788870 100644 --- a/db/revision_cache_bypass.go +++ b/db/revision_cache_bypass.go @@ -30,8 +30,8 @@ func NewBypassRevisionCache(backingStore RevisionCacheBackingStore, bypassStat * } } -// Get fetches the revision for the given docID and revID immediately from the bucket. -func (rc *BypassRevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error) { +// GetWithRev fetches the revision for the given docID and revID immediately from the bucket. +func (rc *BypassRevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { unmarshalLevel := DocUnmarshalSync if includeBody { @@ -45,7 +45,33 @@ func (rc *BypassRevisionCache) Get(ctx context.Context, docID, revID string, inc docRev = DocumentRevision{ RevID: revID, } - docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, revID) + docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.CV, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, revID) + if err != nil { + return DocumentRevision{}, err + } + + rc.bypassStat.Add(1) + + return docRev, nil +} + +// GetWithCV fetches the Current Version for the given docID and CV immediately from the bucket. +func (rc *BypassRevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { + + unmarshalLevel := DocUnmarshalSync + if includeBody { + unmarshalLevel = DocUnmarshalAll + } + docRev = DocumentRevision{ + CV: cv, + } + + doc, err := rc.backingStore.GetDocument(ctx, docID, unmarshalLevel) + if err != nil { + return DocumentRevision{}, err + } + + docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.RevID, err = revCacheLoaderForDocumentCV(ctx, rc.backingStore, doc, *cv) if err != nil { return DocumentRevision{}, err } @@ -71,7 +97,7 @@ func (rc *BypassRevisionCache) GetActive(ctx context.Context, docID string, incl RevID: doc.CurrentRev, } - docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, doc.SyncData.CurrentRev) + docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.CV, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, doc.SyncData.CurrentRev) if err != nil { return DocumentRevision{}, err } @@ -96,7 +122,11 @@ func (rc *BypassRevisionCache) Upsert(ctx context.Context, docRev DocumentRevisi // no-op } -func (rc *BypassRevisionCache) Remove(docID, revID string) { +func (rc *BypassRevisionCache) RemoveWithRev(docID, revID string) { + // nop +} + +func (rc *BypassRevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { // nop } diff --git a/db/revision_cache_interface.go b/db/revision_cache_interface.go index cd8ba32b39..e50ba72f98 100644 --- a/db/revision_cache_interface.go +++ b/db/revision_cache_interface.go @@ -28,10 +28,15 @@ const ( // RevisionCache is an interface that can be used to fetch a DocumentRevision for a Doc ID and Rev ID pair. type RevisionCache interface { - // Get returns the given revision, and stores if not already cached. + // GetWithRev returns the given revision, and stores if not already cached. // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. // When includeDelta=true, the returned DocumentRevision will include delta - requires additional locking during retrieval. - Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error) + GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (DocumentRevision, error) + + // GetWithCV returns the given revision by CV, and stores if not already cached. + // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. + // When includeDelta=true, the returned DocumentRevision will include delta - requires additional locking during retrieval. + GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (DocumentRevision, error) // GetActive returns the current revision for the given doc ID, and stores if not already cached. // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. @@ -46,8 +51,11 @@ type RevisionCache interface { // Update will remove existing value and re-create new one Upsert(ctx context.Context, docRev DocumentRevision) - // Remove eliminates a revision in the cache. - Remove(docID, revID string) + // RemoveWithRev evicts a revision from the cache using its revID. + RemoveWithRev(docID, revID string) + + // RemoveWithCV evicts a revision from the cache using its current version. + RemoveWithCV(docID string, cv *CurrentVersionVector) // UpdateDelta stores the given toDelta value in the given rev if cached UpdateDelta(ctx context.Context, docID, revID string, toDelta RevisionDelta) @@ -104,6 +112,7 @@ func DefaultRevisionCacheOptions() *RevisionCacheOptions { type RevisionCacheBackingStore interface { GetDocument(ctx context.Context, docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error) getRevision(ctx context.Context, doc *Document, revid string) ([]byte, Body, AttachmentsMeta, error) + getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) } // DocumentRevision stored and returned by the rev cache @@ -119,6 +128,7 @@ type DocumentRevision struct { Delta *RevisionDelta Deleted bool Removed bool // True if the revision is a removal. + CV *CurrentVersionVector _shallowCopyBody Body // an unmarshalled body that can produce shallow copies } @@ -223,6 +233,12 @@ type IDAndRev struct { RevID string } +type IDandCV struct { + DocID string + Version uint64 + Source string +} + // RevisionDelta stores data about a delta between a revision and ToRevID. type RevisionDelta struct { ToRevID string // Target revID for the delta @@ -246,44 +262,104 @@ func newRevCacheDelta(deltaBytes []byte, fromRevID string, toRevision DocumentRe // This is the RevisionCacheLoaderFunc callback for the context's RevisionCache. // Its job is to load a revision from the bucket when there's a cache miss. -func revCacheLoader(ctx context.Context, backingStore RevisionCacheBackingStore, id IDAndRev, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, err error) { +func revCacheLoader(ctx context.Context, backingStore RevisionCacheBackingStore, id IDAndRev, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, fetchedCV *CurrentVersionVector, err error) { var doc *Document unmarshalLevel := DocUnmarshalSync if unmarshalBody { unmarshalLevel = DocUnmarshalAll } if doc, err = backingStore.GetDocument(ctx, id.DocID, unmarshalLevel); doc == nil { - return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, err + return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, fetchedCV, err } return revCacheLoaderForDocument(ctx, backingStore, doc, id.RevID) } +// revCacheLoaderForCv will load a document from the bucket using the CV, comapre the fetched doc and the CV specified in the function, +// and will still return revid for purpose of populating the Rev ID lookup map on the cache +func revCacheLoaderForCv(ctx context.Context, backingStore RevisionCacheBackingStore, id IDandCV, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, revid string, err error) { + cv := CurrentVersionVector{ + VersionCAS: id.Version, + SourceID: id.Source, + } + var doc *Document + unmarshalLevel := DocUnmarshalSync + if unmarshalBody { + unmarshalLevel = DocUnmarshalAll + } + if doc, err = backingStore.GetDocument(ctx, id.DocID, unmarshalLevel); doc == nil { + return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, revid, err + } + + return revCacheLoaderForDocumentCV(ctx, backingStore, doc, cv) +} + // Common revCacheLoader functionality used either during a cache miss (from revCacheLoader), or directly when retrieving current rev from cache -func revCacheLoaderForDocument(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, revid string) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, err error) { +func revCacheLoaderForDocument(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, revid string) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, fetchedCV *CurrentVersionVector, err error) { if bodyBytes, body, attachments, err = backingStore.getRevision(ctx, doc, revid); err != nil { // If we can't find the revision (either as active or conflicted body from the document, or as old revision body backup), check whether // the revision was a channel removal. If so, we want to store as removal in the revision cache removalBodyBytes, removalHistory, activeChannels, isRemoval, isDelete, isRemovalErr := doc.IsChannelRemoval(ctx, revid) if isRemovalErr != nil { - return bodyBytes, body, history, channels, isRemoval, nil, isDelete, nil, isRemovalErr + return bodyBytes, body, history, channels, isRemoval, nil, isDelete, nil, fetchedCV, isRemovalErr } if isRemoval { - return removalBodyBytes, body, removalHistory, activeChannels, isRemoval, nil, isDelete, nil, nil + return removalBodyBytes, body, removalHistory, activeChannels, isRemoval, nil, isDelete, nil, fetchedCV, nil } else { // If this wasn't a removal, return the original error from getRevision - return bodyBytes, body, history, channels, removed, nil, isDelete, nil, err + return bodyBytes, body, history, channels, removed, nil, isDelete, nil, fetchedCV, err } } deleted = doc.History[revid].Deleted validatedHistory, getHistoryErr := doc.History.getHistory(revid) if getHistoryErr != nil { - return bodyBytes, body, history, channels, removed, nil, deleted, nil, getHistoryErr + return bodyBytes, body, history, channels, removed, nil, deleted, nil, fetchedCV, getHistoryErr } history = encodeRevisions(ctx, doc.ID, validatedHistory) channels = doc.History[revid].Channels + if doc.HLV != nil { + fetchedCV = &CurrentVersionVector{SourceID: doc.HLV.SourceID, VersionCAS: doc.HLV.Version} + } + + return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, fetchedCV, err +} + +// revCacheLoaderForDocumentCV used either during cache miss (from revCacheLoaderForCv), or used directly when getting current active CV from cache +func revCacheLoaderForDocumentCV(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, cv CurrentVersionVector) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, revid string, err error) { + if bodyBytes, body, attachments, err = backingStore.getCurrentVersion(ctx, doc); err != nil { + // we need implementation of IsChannelRemoval for CV here. + // pending CBG-3213 support of channel removal for CV + } - return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, err + if err = doc.HasCurrentVersion(cv); err != nil { + return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, revid, err + } + channels = doc.currentRevChannels + revid = doc.CurrentRev + + return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, revid, err +} + +func (c *DatabaseCollection) getCurrentVersion(ctx context.Context, doc *Document) (bodyBytes []byte, body Body, attachments AttachmentsMeta, err error) { + bodyBytes, err = doc.BodyBytes(ctx) + if err != nil { + base.WarnfCtx(ctx, "Marshal error when retrieving active current version body: %v", err) + return nil, nil, nil, err + } + + body = doc._body + attachments = doc.Attachments + + // handle backup revision inline attachments, or pre-2.5 meta + if inlineAtts, cleanBodyBytes, cleanBody, err := extractInlineAttachments(bodyBytes); err != nil { + return nil, nil, nil, err + } else if len(inlineAtts) > 0 { + // we found some inline attachments, so merge them with attachments, and update the bodies + attachments = mergeAttachments(inlineAtts, attachments) + bodyBytes = cleanBodyBytes + body = cleanBody + } + return bodyBytes, body, attachments, err } diff --git a/db/revision_cache_lru.go b/db/revision_cache_lru.go index 575c7c6811..32d78d7613 100644 --- a/db/revision_cache_lru.go +++ b/db/revision_cache_lru.go @@ -45,8 +45,12 @@ func (sc *ShardedLRURevisionCache) getShard(docID string) *LRURevisionCache { return sc.caches[sgbucket.VBHash(docID, sc.numShards)] } -func (sc *ShardedLRURevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error) { - return sc.getShard(docID).Get(ctx, docID, revID, includeBody, includeDelta) +func (sc *ShardedLRURevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { + return sc.getShard(docID).GetWithRev(ctx, docID, revID, includeBody, includeDelta) +} + +func (sc *ShardedLRURevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { + return sc.getShard(docID).GetWithCV(ctx, docID, cv, includeBody, includeDelta) } func (sc *ShardedLRURevisionCache) Peek(ctx context.Context, docID, revID string) (docRev DocumentRevision, found bool) { @@ -69,14 +73,19 @@ func (sc *ShardedLRURevisionCache) Upsert(ctx context.Context, docRev DocumentRe sc.getShard(docRev.DocID).Upsert(ctx, docRev) } -func (sc *ShardedLRURevisionCache) Remove(docID, revID string) { - sc.getShard(docID).Remove(docID, revID) +func (sc *ShardedLRURevisionCache) RemoveWithRev(docID, revID string) { + sc.getShard(docID).RemoveWithRev(docID, revID) +} + +func (sc *ShardedLRURevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { + sc.getShard(docID).RemoveWithCV(docID, cv) } // An LRU cache of document revision bodies, together with their channel access. type LRURevisionCache struct { backingStore RevisionCacheBackingStore cache map[IDAndRev]*list.Element + hlvCache map[IDandCV]*list.Element lruList *list.List cacheHits *base.SgwIntStat cacheMisses *base.SgwIntStat @@ -93,7 +102,9 @@ type revCacheValue struct { attachments AttachmentsMeta delta *RevisionDelta body Body - key IDAndRev + id string + cv CurrentVersionVector + revID string bodyBytes []byte lock sync.RWMutex deleted bool @@ -105,6 +116,7 @@ func NewLRURevisionCache(capacity uint32, backingStore RevisionCacheBackingStore return &LRURevisionCache{ cache: map[IDAndRev]*list.Element{}, + hlvCache: map[IDandCV]*list.Element{}, lruList: list.New(), capacity: capacity, backingStore: backingStore, @@ -117,14 +129,18 @@ func NewLRURevisionCache(capacity uint32, backingStore RevisionCacheBackingStore // Returns the body of the revision, its history, and the set of channels it's in. // If the cache has a loaderFunction, it will be called if the revision isn't in the cache; // any error returned by the loaderFunction will be returned from Get. -func (rc *LRURevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error) { - return rc.getFromCache(ctx, docID, revID, true, includeBody, includeDelta) +func (rc *LRURevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (DocumentRevision, error) { + return rc.getFromCacheByRev(ctx, docID, revID, true, includeBody, includeDelta) +} + +func (rc *LRURevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (DocumentRevision, error) { + return rc.getFromCacheByCV(ctx, docID, cv, true, includeBody, includeDelta) } // Looks up a revision from the cache only. Will not fall back to loader function if not // present in the cache. func (rc *LRURevisionCache) Peek(ctx context.Context, docID, revID string) (docRev DocumentRevision, found bool) { - docRev, err := rc.getFromCache(ctx, docID, revID, false, RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := rc.getFromCacheByRev(ctx, docID, revID, false, RevCacheOmitBody, RevCacheOmitDelta) if err != nil { return DocumentRevision{}, false } @@ -140,18 +156,42 @@ func (rc *LRURevisionCache) UpdateDelta(ctx context.Context, docID, revID string } } -func (rc *LRURevisionCache) getFromCache(ctx context.Context, docID, revID string, loadOnCacheMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { +func (rc *LRURevisionCache) getFromCacheByRev(ctx context.Context, docID, revID string, loadOnCacheMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { value := rc.getValue(docID, revID, loadOnCacheMiss) if value == nil { return DocumentRevision{}, nil } - docRev, statEvent, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) - rc.statsRecorderFunc(statEvent) + docRev, cacheHit, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) + rc.statsRecorderFunc(cacheHit) + + if err != nil { + rc.removeValue(value) // don't keep failed loads in the cache + } + if !cacheHit { + rc.addToHLVMapPostLoad(docID, docRev.RevID, docRev.CV) + } + + return docRev, err +} + +func (rc *LRURevisionCache) getFromCacheByCV(ctx context.Context, docID string, cv *CurrentVersionVector, loadCacheOnMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { + value := rc.getValueByCV(docID, cv, loadCacheOnMiss) + if value == nil { + return DocumentRevision{}, nil + } + + docRev, cacheHit, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) + rc.statsRecorderFunc(cacheHit) if err != nil { rc.removeValue(value) // don't keep failed loads in the cache } + + if !cacheHit { + rc.addToRevMapPostLoad(docID, docRev.RevID, docRev.CV) + } + return docRev, err } @@ -162,15 +202,16 @@ func (rc *LRURevisionCache) LoadInvalidRevFromBackingStore(ctx context.Context, var docRevBody Body value := revCacheValue{ - key: key, + id: key.DocID, + revID: key.RevID, } // If doc has been passed in use this to grab values. Otherwise run revCacheLoader which will grab the Document // first if doc != nil { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, key.RevID) + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, _, value.err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, key.RevID) } else { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoader(ctx, rc.backingStore, key, includeBody) + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, _, value.err = revCacheLoader(ctx, rc.backingStore, key, includeBody) } if includeDelta { @@ -210,12 +251,15 @@ func (rc *LRURevisionCache) GetActive(ctx context.Context, docID string, include // Retrieve from or add to rev cache value := rc.getValue(docID, bucketDoc.CurrentRev, true) - docRev, statEvent, err := value.loadForDoc(ctx, rc.backingStore, bucketDoc, includeBody) - rc.statsRecorderFunc(statEvent) + docRev, cacheHit, err := value.loadForDoc(ctx, rc.backingStore, bucketDoc, includeBody) + rc.statsRecorderFunc(cacheHit) if err != nil { rc.removeValue(value) // don't keep failed loads in the cache } + // add successfully fetched value to cv lookup map too + rc.addToHLVMapPostLoad(docID, docRev.RevID, docRev.CV) + return docRev, err } @@ -234,30 +278,43 @@ func (rc *LRURevisionCache) Put(ctx context.Context, docRev DocumentRevision) { // TODO: CBG-1948 panic("Missing history for RevisionCache.Put") } - value := rc.getValue(docRev.DocID, docRev.RevID, true) + // doc should always have a cv present in a PUT operation on the cache (update HLV is called before hand in doc update process) + // thus we can call getValueByCV directly the update the rev lookup post this + value := rc.getValueByCV(docRev.DocID, docRev.CV, true) + // store the created value value.store(docRev) + + // add new doc version to the rev id lookup map + rc.addToRevMapPostLoad(docRev.DocID, docRev.RevID, docRev.CV) } // Upsert a revision in the cache. func (rc *LRURevisionCache) Upsert(ctx context.Context, docRev DocumentRevision) { - key := IDAndRev{DocID: docRev.DocID, RevID: docRev.RevID} + var value *revCacheValue + // similar to PUT operation we should have the CV defined by this point (updateHLV is called before calling this) + key := IDandCV{DocID: docRev.DocID, Source: docRev.CV.SourceID, Version: docRev.CV.VersionCAS} + legacyKey := IDAndRev{DocID: docRev.DocID, RevID: docRev.RevID} rc.lock.Lock() - // If element exists remove from lrulist - if elem := rc.cache[key]; elem != nil { + // lookup for element in hlv lookup map, if not found for some reason try rev lookup map + if elem := rc.hlvCache[key]; elem != nil { + rc.lruList.Remove(elem) + } else if elem = rc.cache[legacyKey]; elem != nil { rc.lruList.Remove(elem) } // Add new value and overwrite existing cache key, pushing to front to maintain order - value := &revCacheValue{key: key} - rc.cache[key] = rc.lruList.PushFront(value) + // also ensure we add to rev id lookup map too + value = &revCacheValue{id: docRev.DocID, cv: *docRev.CV} + elem := rc.lruList.PushFront(value) + rc.hlvCache[key] = elem + rc.cache[legacyKey] = elem - // Purge oldest item if required - for len(rc.cache) > int(rc.capacity) { + for rc.lruList.Len() > int(rc.capacity) { rc.purgeOldest_() } rc.lock.Unlock() - + // store upsert value value.store(docRev) } @@ -272,9 +329,32 @@ func (rc *LRURevisionCache) getValue(docID, revID string, create bool) (value *r rc.lruList.MoveToFront(elem) value = elem.Value.(*revCacheValue) } else if create { - value = &revCacheValue{key: key} + value = &revCacheValue{id: docID, revID: revID} rc.cache[key] = rc.lruList.PushFront(value) - for len(rc.cache) > int(rc.capacity) { + for rc.lruList.Len() > int(rc.capacity) { + rc.purgeOldest_() + } + } + rc.lock.Unlock() + return +} + +// getValueByCV gets a value from rev cache by CV, if not found and create is true, will add the value to cache and both lookup maps +func (rc *LRURevisionCache) getValueByCV(docID string, cv *CurrentVersionVector, create bool) (value *revCacheValue) { + if docID == "" || cv == nil { + return nil + } + + key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} + rc.lock.Lock() + if elem := rc.hlvCache[key]; elem != nil { + rc.lruList.MoveToFront(elem) + value = elem.Value.(*revCacheValue) + } else if create { + value = &revCacheValue{id: docID, cv: *cv} + newElem := rc.lruList.PushFront(value) + rc.hlvCache[key] = newElem + for rc.lruList.Len() > int(rc.capacity) { rc.purgeOldest_() } } @@ -282,8 +362,93 @@ func (rc *LRURevisionCache) getValue(docID, revID string, create bool) (value *r return } +// addToRevMapPostLoad will generate and entry in the Rev lookup map for a new document entering the cache +func (rc *LRURevisionCache) addToRevMapPostLoad(docID, revID string, cv *CurrentVersionVector) { + legacyKey := IDAndRev{DocID: docID, RevID: revID} + key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} + + rc.lock.Lock() + defer rc.lock.Unlock() + // check for existing value in rev cache map (due to concurrent fetch by rev ID) + cvElem, cvFound := rc.hlvCache[key] + revElem, revFound := rc.cache[legacyKey] + if !cvFound { + // its possible the element has been evicted if we don't find the element above (high churn on rev cache) + // need to return doc revision to caller still but no need repopulate the cache + return + } + // Check if another goroutine has already updated the rev map + if revFound { + if cvElem == revElem { + // already match, return + return + } + // if CV map and rev map are targeting different list elements, update to have both use the cv map element + rc.cache[legacyKey] = cvElem + rc.lruList.Remove(revElem) + } else { + // if not found we need to add the element to the rev lookup (for PUT code path) + rc.cache[legacyKey] = cvElem + } +} + +// addToHLVMapPostLoad will generate and entry in the CV lookup map for a new document entering the cache +func (rc *LRURevisionCache) addToHLVMapPostLoad(docID, revID string, cv *CurrentVersionVector) { + legacyKey := IDAndRev{DocID: docID, RevID: revID} + key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} + + rc.lock.Lock() + defer rc.lock.Unlock() + // check for existing value in rev cache map (due to concurrent fetch by rev ID) + cvElem, cvFound := rc.hlvCache[key] + revElem, revFound := rc.cache[legacyKey] + if !revFound { + // its possible the element has been evicted if we don't find the element above (high churn on rev cache) + // need to return doc revision to caller still but no need repopulate the cache + return + } + // Check if another goroutine has already updated the cv map + if cvFound { + if cvElem == revElem { + // already match, return + return + } + // if CV map and rev map are targeting different list elements, update to have both use the cv map element + rc.cache[legacyKey] = cvElem + rc.lruList.Remove(revElem) + } +} + // Remove removes a value from the revision cache, if present. -func (rc *LRURevisionCache) Remove(docID, revID string) { +func (rc *LRURevisionCache) RemoveWithRev(docID, revID string) { + rc.removeFromCacheByRev(docID, revID) +} + +// RemoveWithCV removes a value from rev cache by CV reference if present +func (rc *LRURevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { + rc.removeFromCacheByCV(docID, cv) +} + +// removeFromCacheByCV removes an entry from rev cache by CV +func (rc *LRURevisionCache) removeFromCacheByCV(docID string, cv *CurrentVersionVector) { + key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} + rc.lock.Lock() + defer rc.lock.Unlock() + element, ok := rc.hlvCache[key] + if !ok { + return + } + // grab the revid key from the value to enable us to remove the reference from the rev lookup map too + elem := element.Value.(*revCacheValue) + legacyKey := IDAndRev{DocID: docID, RevID: elem.revID} + rc.lruList.Remove(element) + delete(rc.hlvCache, key) + // remove from rev lookup map too + delete(rc.cache, legacyKey) +} + +// removeFromCacheByRev removes an entry from rev cache by revID +func (rc *LRURevisionCache) removeFromCacheByRev(docID, revID string) { key := IDAndRev{DocID: docID, RevID: revID} rc.lock.Lock() defer rc.lock.Unlock() @@ -291,23 +456,38 @@ func (rc *LRURevisionCache) Remove(docID, revID string) { if !ok { return } + // grab the cv key key from the value to enable us to remove the reference from the rev lookup map too + elem := element.Value.(*revCacheValue) + hlvKey := IDandCV{DocID: docID, Source: elem.cv.SourceID, Version: elem.cv.VersionCAS} rc.lruList.Remove(element) delete(rc.cache, key) + // remove from CV lookup map too + delete(rc.hlvCache, hlvKey) } // removeValue removes a value from the revision cache, if present and the value matches the the value. If there's an item in the revision cache with a matching docID and revID but the document is different, this item will not be removed from the rev cache. func (rc *LRURevisionCache) removeValue(value *revCacheValue) { rc.lock.Lock() - if element := rc.cache[value.key]; element != nil && element.Value == value { + defer rc.lock.Unlock() + revKey := IDAndRev{DocID: value.id, RevID: value.revID} + if element := rc.cache[revKey]; element != nil && element.Value == value { rc.lruList.Remove(element) - delete(rc.cache, value.key) + delete(rc.cache, revKey) + } + // need to also check hlv lookup cache map + hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} + if element := rc.hlvCache[hlvKey]; element != nil && element.Value == value { + rc.lruList.Remove(element) + delete(rc.hlvCache, hlvKey) } - rc.lock.Unlock() } func (rc *LRURevisionCache) purgeOldest_() { value := rc.lruList.Remove(rc.lruList.Back()).(*revCacheValue) - delete(rc.cache, value.key) + revKey := IDAndRev{DocID: value.id, RevID: value.revID} + hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} + delete(rc.cache, revKey) + delete(rc.hlvCache, hlvKey) } // Gets the body etc. out of a revCacheValue. If they aren't present already, the loader func @@ -319,6 +499,8 @@ func (value *revCacheValue) load(ctx context.Context, backingStore RevisionCache // to reduce locking when includeDelta=false var delta *RevisionDelta var docRevBody Body + var fetchedCV *CurrentVersionVector + var revid string // Attempt to read cached value. value.lock.RLock() @@ -349,12 +531,24 @@ func (value *revCacheValue) load(ctx context.Context, backingStore RevisionCache // If body is requested and not already present in cache, populate value.body from value.BodyBytes if includeBody && value.body == nil && value.err == nil { if err := value.body.Unmarshal(value.bodyBytes); err != nil { - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) } } } else { cacheHit = false - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoader(ctx, backingStore, value.key, includeBody) + if value.revID == "" { + hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, revid, value.err = revCacheLoaderForCv(ctx, backingStore, hlvKey, includeBody) + // based off the current value load we need to populate the revid key with what has been fetched from the bucket (for use of populating the opposite lookup map) + value.revID = revid + } else { + revKey := IDAndRev{DocID: value.id, RevID: value.revID} + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, fetchedCV, value.err = revCacheLoader(ctx, backingStore, revKey, includeBody) + // based off the revision load we need to populate the hlv key with what has been fetched from the bucket (for use of populating the opposite lookup map) + if fetchedCV != nil { + value.cv = *fetchedCV + } + } } if includeDelta { @@ -374,7 +568,7 @@ func (value *revCacheValue) updateBody(ctx context.Context) (err error) { var body Body if err := body.Unmarshal(value.bodyBytes); err != nil { // On unmarshal error, warn return docRev without body - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) return err } @@ -391,8 +585,8 @@ func (value *revCacheValue) updateBody(ctx context.Context) (err error) { func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) (DocumentRevision, error) { docRev := DocumentRevision{ - DocID: value.key.DocID, - RevID: value.key.RevID, + DocID: value.id, + RevID: value.revID, BodyBytes: value.bodyBytes, History: value.history, Channels: value.channels, @@ -400,6 +594,7 @@ func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) Attachments: value.attachments.ShallowCopy(), // Avoid caller mutating the stored attachments Deleted: value.deleted, Removed: value.removed, + CV: &CurrentVersionVector{VersionCAS: value.cv.VersionCAS, SourceID: value.cv.SourceID}, } if body != nil { docRev._shallowCopyBody = body.ShallowCopy() @@ -414,6 +609,8 @@ func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, includeBody bool) (docRev DocumentRevision, cacheHit bool, err error) { var docRevBody Body + var fetchedCV *CurrentVersionVector + var revid string value.lock.RLock() if value.bodyBytes != nil || value.err != nil { if includeBody { @@ -443,13 +640,22 @@ func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore Revisio // If body is requested and not already present in cache, attempt to generate from bytes and insert into cache if includeBody && value.body == nil { if err := value.body.Unmarshal(value.bodyBytes); err != nil { - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) } } } else { cacheHit = false - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoaderForDocument(ctx, backingStore, doc, value.key.RevID) + if value.revID == "" { + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, revid, value.err = revCacheLoaderForDocumentCV(ctx, backingStore, doc, value.cv) + value.revID = revid + } else { + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, fetchedCV, value.err = revCacheLoaderForDocument(ctx, backingStore, doc, value.revID) + if fetchedCV != nil { + value.cv = *fetchedCV + } + } } + if includeBody { docRevBody = value.body } @@ -462,7 +668,7 @@ func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore Revisio func (value *revCacheValue) store(docRev DocumentRevision) { value.lock.Lock() if value.bodyBytes == nil { - // value already has doc id/rev id in key + value.revID = docRev.RevID value.bodyBytes = docRev.BodyBytes value.history = docRev.History value.channels = docRev.Channels diff --git a/db/revision_cache_test.go b/db/revision_cache_test.go index 1451d353d9..d5abbe6b97 100644 --- a/db/revision_cache_test.go +++ b/db/revision_cache_test.go @@ -50,6 +50,13 @@ func (t *testBackingStore) GetDocument(ctx context.Context, docid string, unmars Channels: base.SetOf("*"), }, } + doc.currentRevChannels = base.SetOf("*") + + doc.HLV = &HybridLogicalVector{ + SourceID: "test", + Version: 123, + } + return doc, nil } @@ -66,6 +73,19 @@ func (t *testBackingStore) getRevision(ctx context.Context, doc *Document, revid return bodyBytes, b, nil, err } +func (t *testBackingStore) getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) { + t.getRevisionCounter.Add(1) + + b := Body{ + "testing": true, + BodyId: doc.ID, + BodyRev: doc.CurrentRev, + "current_version": &CurrentVersionVector{VersionCAS: doc.HLV.Version, SourceID: doc.HLV.SourceID}, + } + bodyBytes, err := base.JSONMarshal(b) + return bodyBytes, b, nil, err +} + type noopBackingStore struct{} func (*noopBackingStore) GetDocument(ctx context.Context, docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error) { @@ -76,6 +96,10 @@ func (*noopBackingStore) getRevision(ctx context.Context, doc *Document, revid s return nil, nil, nil, nil } +func (*noopBackingStore) getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) { + return nil, nil, nil, nil +} + // Tests the eviction from the LRURevisionCache func TestLRURevisionCacheEviction(t *testing.T) { cacheHitCounter, cacheMissCounter := base.SgwIntStat{}, base.SgwIntStat{} @@ -86,13 +110,13 @@ func TestLRURevisionCacheEviction(t *testing.T) { // Fill up the rev cache with the first 10 docs for docID := 0; docID < 10; docID++ { id := strconv.Itoa(docID) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", History: Revisions{"start": 1}}) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) } // Get them back out for i := 0; i < 10; i++ { docID := strconv.Itoa(i) - docRev, err := cache.Get(ctx, docID, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.GetWithRev(ctx, docID, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.NotNil(t, docRev.BodyBytes, "nil body for %s", docID) assert.Equal(t, docID, docRev.DocID) @@ -103,7 +127,7 @@ func TestLRURevisionCacheEviction(t *testing.T) { // Add 3 more docs to the now full revcache for i := 10; i < 13; i++ { docID := strconv.Itoa(i) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: docID, RevID: "1-abc", History: Revisions{"start": 1}}) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: docID, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(i), SourceID: "test"}, History: Revisions{"start": 1}}) } // Check that the first 3 docs were evicted @@ -120,7 +144,68 @@ func TestLRURevisionCacheEviction(t *testing.T) { // and check we can Get up to and including the last 3 we put in for i := 0; i < 10; i++ { id := strconv.Itoa(i + 3) - docRev, err := cache.Get(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.GetWithRev(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + assert.NoError(t, err) + assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) + assert.Equal(t, id, docRev.DocID) + assert.Equal(t, int64(0), cacheMissCounter.Value()) + assert.Equal(t, prevCacheHitCount+int64(i)+1, cacheHitCounter.Value()) + } +} + +// TestLRURevisionCacheEvictionMixedRevAndCV: +// - Add 10 docs to the cache +// - Assert that the cache list and relevant lookup maps have correct lengths +// - Add 3 more docs +// - Assert that lookup maps and the cache list still only have 10 elements in +// - Perform a Get with CV specified on all 10 elements in the cache and assert we get a hit for each element and no misses, +// testing the eviction worked correct +// - Then do the same but for rev lookup +func TestLRURevisionCacheEvictionMixedRevAndCV(t *testing.T) { + cacheHitCounter, cacheMissCounter := base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &noopBackingStore{}, &cacheHitCounter, &cacheMissCounter) + + ctx := base.TestCtx(t) + + // Fill up the rev cache with the first 10 docs + for docID := 0; docID < 10; docID++ { + id := strconv.Itoa(docID) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) + } + + // assert that the list has 10 elements along with both lookup maps + assert.Equal(t, 10, len(cache.hlvCache)) + assert.Equal(t, 10, len(cache.cache)) + assert.Equal(t, 10, cache.lruList.Len()) + + // Add 3 more docs to the now full rev cache to trigger eviction + for docID := 10; docID < 13; docID++ { + id := strconv.Itoa(docID) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) + } + // assert the cache and associated lookup maps only have 10 items in them (i.e.e is eviction working?) + assert.Equal(t, 10, len(cache.hlvCache)) + assert.Equal(t, 10, len(cache.cache)) + assert.Equal(t, 10, cache.lruList.Len()) + + // assert we can get a hit on all 10 elements in the cache by CV lookup + prevCacheHitCount := cacheHitCounter.Value() + for i := 0; i < 10; i++ { + id := strconv.Itoa(i + 3) + cv := CurrentVersionVector{VersionCAS: uint64(i + 3), SourceID: "test"} + docRev, err := cache.GetWithCV(ctx, id, &cv, RevCacheOmitBody, RevCacheOmitDelta) + assert.NoError(t, err) + assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) + assert.Equal(t, id, docRev.DocID) + assert.Equal(t, int64(0), cacheMissCounter.Value()) + assert.Equal(t, prevCacheHitCount+int64(i)+1, cacheHitCounter.Value()) + } + + // now do same but for rev lookup + prevCacheHitCount = cacheHitCounter.Value() + for i := 0; i < 10; i++ { + id := strconv.Itoa(i + 3) + docRev, err := cache.GetWithRev(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) assert.Equal(t, id, docRev.DocID) @@ -135,7 +220,7 @@ func TestBackingStore(t *testing.T) { cache := NewLRURevisionCache(10, &testBackingStore{[]string{"Peter"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) // Get Rev for the first time - miss cache, but fetch the doc and revision to store - docRev, err := cache.Get(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.GetWithRev(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.Equal(t, "Jens", docRev.DocID) assert.NotNil(t, docRev.History) @@ -146,7 +231,7 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Doc doesn't exist, so miss the cache, and fail when getting the doc - docRev, err = cache.Get(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.GetWithRev(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) assert.Nil(t, docRev.BodyBytes) assert.Equal(t, int64(0), cacheHitCounter.Value()) @@ -155,7 +240,7 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Rev is already resident, but still issue GetDocument to check for later revisions - docRev, err = cache.Get(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.GetWithRev(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.Equal(t, "Jens", docRev.DocID) assert.NotNil(t, docRev.History) @@ -166,7 +251,60 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Rev still doesn't exist, make sure it wasn't cached - docRev, err = cache.Get(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.GetWithRev(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + assertHTTPError(t, err, 404) + assert.Nil(t, docRev.BodyBytes) + assert.Equal(t, int64(1), cacheHitCounter.Value()) + assert.Equal(t, int64(3), cacheMissCounter.Value()) + assert.Equal(t, int64(3), getDocumentCounter.Value()) + assert.Equal(t, int64(1), getRevisionCounter.Value()) +} + +// TestBackingStoreCV: +// - Perform a Get on a doc by cv that is not currently in the rev cache, assert we get cache miss +// - Perform a Get again on the same doc and assert we get cache hit +// - Perform a Get on doc that doesn't exist, so misses cache and will fail on retrieving doc from bucket +// - Try a Get again on the same doc and assert it wasn't loaded into the cache as it doesn't exist +func TestBackingStoreCV(t *testing.T) { + cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &testBackingStore{[]string{"not_found"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) + + // Get Rev for the first time - miss cache, but fetch the doc and revision to store + cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} + docRev, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) + assert.NoError(t, err) + assert.Equal(t, "doc1", docRev.DocID) + assert.NotNil(t, docRev.Channels) + assert.Equal(t, "test", docRev.CV.SourceID) + assert.Equal(t, uint64(123), docRev.CV.VersionCAS) + assert.Equal(t, int64(0), cacheHitCounter.Value()) + assert.Equal(t, int64(1), cacheMissCounter.Value()) + assert.Equal(t, int64(1), getDocumentCounter.Value()) + assert.Equal(t, int64(1), getRevisionCounter.Value()) + + // Perform a get on the same doc as above, check that we get cache hit + docRev, err = cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) + assert.NoError(t, err) + assert.Equal(t, "doc1", docRev.DocID) + assert.Equal(t, "test", docRev.CV.SourceID) + assert.Equal(t, uint64(123), docRev.CV.VersionCAS) + assert.Equal(t, int64(1), cacheHitCounter.Value()) + assert.Equal(t, int64(1), cacheMissCounter.Value()) + assert.Equal(t, int64(1), getDocumentCounter.Value()) + assert.Equal(t, int64(1), getRevisionCounter.Value()) + + // Doc doesn't exist, so miss the cache, and fail when getting the doc + cv = CurrentVersionVector{SourceID: "test11", VersionCAS: 100} + docRev, err = cache.GetWithCV(base.TestCtx(t), "not_found", &cv, RevCacheOmitBody, RevCacheOmitDelta) + assertHTTPError(t, err, 404) + assert.Nil(t, docRev.BodyBytes) + assert.Equal(t, int64(1), cacheHitCounter.Value()) + assert.Equal(t, int64(2), cacheMissCounter.Value()) + assert.Equal(t, int64(2), getDocumentCounter.Value()) + assert.Equal(t, int64(1), getRevisionCounter.Value()) + + // Rev still doesn't exist, make sure it wasn't cached + docRev, err = cache.GetWithCV(base.TestCtx(t), "not_found", &cv, RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) assert.Nil(t, docRev.BodyBytes) assert.Equal(t, int64(1), cacheHitCounter.Value()) @@ -255,15 +393,15 @@ func TestBypassRevisionCache(t *testing.T) { assert.False(t, ok) // Get non-existing doc - _, err = rc.Get(base.TestCtx(t), "invalid", rev1, RevCacheOmitBody, RevCacheOmitDelta) + _, err = rc.GetWithRev(base.TestCtx(t), "invalid", rev1, RevCacheOmitBody, RevCacheOmitDelta) assert.True(t, base.IsDocNotFoundError(err)) // Get non-existing revision - _, err = rc.Get(base.TestCtx(t), key, "3-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, err = rc.GetWithRev(base.TestCtx(t), key, "3-abc", RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) // Get specific revision - doc, err := rc.Get(base.TestCtx(t), key, rev1, RevCacheOmitBody, RevCacheOmitDelta) + doc, err := rc.GetWithRev(base.TestCtx(t), key, rev1, RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) require.NotNil(t, doc) assert.Equal(t, `{"value":1234}`, string(doc.BodyBytes)) @@ -350,7 +488,7 @@ func TestPutExistingRevRevisionCacheAttachmentProperty(t *testing.T) { "value": 1235, BodyAttachments: map[string]interface{}{"myatt": map[string]interface{}{"content_type": "text/plain", "data": "SGVsbG8gV29ybGQh"}}, } - _, _, err = collection.PutExistingRevWithBody(ctx, docKey, rev2body, []string{rev2id, rev1id}, false) + _, _, err = collection.PutExistingRevWithBody(ctx, docKey, rev2body, []string{rev2id, rev1id}, false, ExistingVersionWithUpdateToHLV) assert.NoError(t, err, "Unexpected error calling collection.PutExistingRev") // Get the raw document directly from the bucket, validate _attachments property isn't found @@ -361,7 +499,7 @@ func TestPutExistingRevRevisionCacheAttachmentProperty(t *testing.T) { assert.False(t, ok, "_attachments property still present in document body retrieved from bucket: %#v", bucketBody) // Get the raw document directly from the revcache, validate _attachments property isn't found - docRevision, err := collection.revisionCache.Get(base.TestCtx(t), docKey, rev2id, RevCacheOmitBody, RevCacheOmitDelta) + docRevision, err := collection.revisionCache.GetWithRev(base.TestCtx(t), docKey, rev2id, RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err, "Unexpected error calling collection.revisionCache.Get") assert.NotContains(t, docRevision.BodyBytes, BodyAttachments, "_attachments property still present in document body retrieved from rev cache: %#v", bucketBody) _, ok = docRevision.Attachments["myatt"] @@ -388,12 +526,12 @@ func TestRevisionImmutableDelta(t *testing.T) { secondDelta := []byte("modified delta") // Trigger load into cache - _, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + _, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error adding to cache") cache.UpdateDelta(base.TestCtx(t), "doc1", "1-abc", RevisionDelta{ToRevID: "rev2", DeltaBytes: firstDelta}) // Retrieve from cache - retrievedRev, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + retrievedRev, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error retrieving from cache") assert.Equal(t, "rev2", retrievedRev.Delta.ToRevID) assert.Equal(t, firstDelta, retrievedRev.Delta.DeltaBytes) @@ -404,7 +542,7 @@ func TestRevisionImmutableDelta(t *testing.T) { assert.Equal(t, firstDelta, retrievedRev.Delta.DeltaBytes) // Retrieve again, validate delta is correct - updatedRev, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + updatedRev, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error retrieving from cache") assert.Equal(t, "rev3", updatedRev.Delta.ToRevID) assert.Equal(t, secondDelta, updatedRev.Delta.DeltaBytes) @@ -419,8 +557,8 @@ func TestSingleLoad(t *testing.T) { cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} cache := NewLRURevisionCache(10, &testBackingStore{nil, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc123", RevID: "1-abc", History: Revisions{"start": 1}}) - _, err := cache.Get(base.TestCtx(t), "doc123", "1-abc", true, false) + cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc123", RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(123), SourceID: "test"}, History: Revisions{"start": 1}}) + _, err := cache.GetWithRev(base.TestCtx(t), "doc123", "1-abc", true, false) assert.NoError(t, err) } @@ -429,14 +567,14 @@ func TestConcurrentLoad(t *testing.T) { cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} cache := NewLRURevisionCache(10, &testBackingStore{nil, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc1", RevID: "1-abc", History: Revisions{"start": 1}}) + cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc1", RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(1234), SourceID: "test"}, History: Revisions{"start": 1}}) // Trigger load into cache var wg sync.WaitGroup wg.Add(20) for i := 0; i < 20; i++ { go func() { - _, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", true, false) + _, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", true, false) assert.NoError(t, err) wg.Done() }() @@ -454,14 +592,14 @@ func TestRevisionCacheRemove(t *testing.T) { rev1id, _, err := collection.Put(ctx, "doc", Body{"val": 123}) assert.NoError(t, err) - docRev, err := collection.revisionCache.Get(base.TestCtx(t), "doc", rev1id, true, true) + docRev, err := collection.revisionCache.GetWithRev(base.TestCtx(t), "doc", rev1id, true, true) assert.NoError(t, err) assert.Equal(t, rev1id, docRev.RevID) assert.Equal(t, int64(0), db.DbStats.Cache().RevisionCacheMisses.Value()) - collection.revisionCache.Remove("doc", rev1id) + collection.revisionCache.RemoveWithRev("doc", rev1id) - docRev, err = collection.revisionCache.Get(base.TestCtx(t), "doc", rev1id, true, true) + docRev, err = collection.revisionCache.GetWithRev(base.TestCtx(t), "doc", rev1id, true, true) assert.NoError(t, err) assert.Equal(t, rev1id, docRev.RevID) assert.Equal(t, int64(1), db.DbStats.Cache().RevisionCacheMisses.Value()) @@ -482,6 +620,59 @@ func TestRevisionCacheRemove(t *testing.T) { assert.Equal(t, int64(1), db.DbStats.Cache().RevisionCacheMisses.Value()) } +// TestRevCacheOperationsCV: +// - Create doc revision, put the revision into the cache +// - Perform a get on that doc by cv and assert that it has correctly been handled +// - Updated doc revision and upsert the cache +// - Get the updated doc by cv and assert iot has been correctly handled +// - Peek the doc by cv and assert it has been found +// - Peek the rev id cache for the same doc and assert that doc also has been updated in that lookup cache +// - Remove the doc by cv, and asser that the doc is gone +func TestRevCacheOperationsCV(t *testing.T) { + cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) + + cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} + documentRevision := DocumentRevision{ + DocID: "doc1", + RevID: "1-abc", + BodyBytes: []byte(`{"test":"1234"}`), + Channels: base.SetOf("chan1"), + History: Revisions{"start": 1}, + CV: &cv, + } + cache.Put(base.TestCtx(t), documentRevision) + + docRev, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) + require.NoError(t, err) + assert.Equal(t, "doc1", docRev.DocID) + assert.Equal(t, base.SetOf("chan1"), docRev.Channels) + assert.Equal(t, "test", docRev.CV.SourceID) + assert.Equal(t, uint64(123), docRev.CV.VersionCAS) + assert.Equal(t, int64(1), cacheHitCounter.Value()) + assert.Equal(t, int64(0), cacheMissCounter.Value()) + + documentRevision.BodyBytes = []byte(`{"test":"12345"}`) + + cache.Upsert(base.TestCtx(t), documentRevision) + + docRev, err = cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) + require.NoError(t, err) + assert.Equal(t, "doc1", docRev.DocID) + assert.Equal(t, base.SetOf("chan1"), docRev.Channels) + assert.Equal(t, "test", docRev.CV.SourceID) + assert.Equal(t, uint64(123), docRev.CV.VersionCAS) + assert.Equal(t, []byte(`{"test":"12345"}`), docRev.BodyBytes) + assert.Equal(t, int64(2), cacheHitCounter.Value()) + assert.Equal(t, int64(0), cacheMissCounter.Value()) + + // remove the doc rev from the cache and assert that the document is no longer present in cache + cache.RemoveWithCV("doc1", &cv) + assert.Equal(t, 0, len(cache.cache)) + assert.Equal(t, 0, len(cache.hlvCache)) + assert.Equal(t, 0, cache.lruList.Len()) +} + func BenchmarkRevisionCacheRead(b *testing.B) { base.SetUpBenchmarkLogging(b, base.LevelDebug, base.KeyAll) @@ -492,7 +683,7 @@ func BenchmarkRevisionCacheRead(b *testing.B) { // trigger load into cache for i := 0; i < 5000; i++ { - _, _ = cache.Get(ctx, fmt.Sprintf("doc%d", i), "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, _ = cache.GetWithRev(ctx, fmt.Sprintf("doc%d", i), "1-abc", RevCacheOmitBody, RevCacheOmitDelta) } b.ResetTimer() @@ -500,7 +691,147 @@ func BenchmarkRevisionCacheRead(b *testing.B) { // GET the document until test run has completed for pb.Next() { docId := fmt.Sprintf("doc%d", rand.Intn(5000)) - _, _ = cache.Get(ctx, docId, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, _ = cache.GetWithRev(ctx, docId, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) } }) } + +// TestLoaderMismatchInCV: +// - Get doc that is not in cache by CV to trigger a load from bucket +// - Ensure the CV passed into teh GET operation won't match the doc in teh bucket +// - Assert we get error and the value is not loaded into the cache +func TestLoaderMismatchInCV(t *testing.T) { + cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) + + // create cv with incorrect version to the one stored in backing store + cv := CurrentVersionVector{SourceID: "test", VersionCAS: 1234} + + _, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) + require.Error(t, err) + assert.ErrorContains(t, err, "mismatch between specified current version and fetched document current version for doc") + assert.Equal(t, int64(0), cacheHitCounter.Value()) + assert.Equal(t, int64(1), cacheMissCounter.Value()) + assert.Equal(t, 0, cache.lruList.Len()) + assert.Equal(t, 0, len(cache.hlvCache)) + assert.Equal(t, 0, len(cache.cache)) +} + +// TestConcurrentLoadByCVAndRevOnCache: +// - Create cache +// - Now perform two concurrent Gets, one by CV and one by revid on a document that doesn't exist in the cache +// - This will trigger two concurrent loads from bucket in the CV code path and revid code path +// - In doing so we will have two processes trying to update lookup maps at the same time and a race condition will appear +// - In doing so will cause us to potentially have two of teh same elements the cache, one with nothing referencing it +// - Assert after both gets are processed, that the cache only has one element in it and that both lookup maps have only one +// element +// - Grab the single element in the list and assert that both maps point to that element in the cache list +func TestConcurrentLoadByCVAndRevOnCache(t *testing.T) { + cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) + + ctx := base.TestCtx(t) + + wg := sync.WaitGroup{} + wg.Add(2) + + cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} + go func() { + _, err := cache.GetWithRev(ctx, "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + require.NoError(t, err) + wg.Done() + }() + + go func() { + _, err := cache.GetWithCV(ctx, "doc1", &cv, RevCacheOmitBody, RevCacheIncludeDelta) + require.NoError(t, err) + wg.Done() + }() + + wg.Wait() + + revElement := cache.cache[IDAndRev{RevID: "1-abc", DocID: "doc1"}] + cvElement := cache.hlvCache[IDandCV{DocID: "doc1", Source: "test", Version: 123}] + assert.Equal(t, 1, cache.lruList.Len()) + assert.Equal(t, 1, len(cache.cache)) + assert.Equal(t, 1, len(cache.hlvCache)) + // grab the single elem in the cache list + cacheElem := cache.lruList.Front() + // assert that both maps point to the same element in cache list + assert.Equal(t, cacheElem, cvElement) + assert.Equal(t, cacheElem, revElement) +} + +// TestGetActive: +// - Create db, create a doc on the db +// - Call GetActive pn the rev cache and assert that the rev and cv are correct +func TestGetActive(t *testing.T) { + db, ctx := setupTestDB(t) + defer db.Close(ctx) + collection := GetSingleDatabaseCollectionWithUser(t, db) + + rev1id, doc, err := collection.Put(ctx, "doc", Body{"val": 123}) + require.NoError(t, err) + + expectedCV := CurrentVersionVector{ + SourceID: db.BucketUUID, + VersionCAS: doc.Cas, + } + + // remove the entry form the rev cache to force teh cache to not have the active version in it + collection.revisionCache.RemoveWithCV("doc", &expectedCV) + + // call get active to get teh active version from the bucket + docRev, err := collection.revisionCache.GetActive(base.TestCtx(t), "doc", true) + assert.NoError(t, err) + assert.Equal(t, rev1id, docRev.RevID) + assert.Equal(t, expectedCV, *docRev.CV) +} + +// TestConcurrentPutAndGetOnRevCache: +// - Perform a Get with rev on the cache for a doc not in the cache +// - Concurrently perform a PUT on the cache with doc revision the same as the GET +// - Assert we get consistent cache with only 1 entry in lookup maps and the cache itself +func TestConcurrentPutAndGetOnRevCache(t *testing.T) { + cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} + cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) + + ctx := base.TestCtx(t) + + wg := sync.WaitGroup{} + wg.Add(2) + + cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} + docRev := DocumentRevision{ + DocID: "doc1", + RevID: "1-abc", + BodyBytes: []byte(`{"test":"1234"}`), + Channels: base.SetOf("chan1"), + History: Revisions{"start": 1}, + CV: &cv, + } + + go func() { + _, err := cache.GetWithRev(ctx, "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + require.NoError(t, err) + wg.Done() + }() + + go func() { + cache.Put(ctx, docRev) + wg.Done() + }() + + wg.Wait() + + revElement := cache.cache[IDAndRev{RevID: "1-abc", DocID: "doc1"}] + cvElement := cache.hlvCache[IDandCV{DocID: "doc1", Source: "test", Version: 123}] + + assert.Equal(t, 1, cache.lruList.Len()) + assert.Equal(t, 1, len(cache.cache)) + assert.Equal(t, 1, len(cache.hlvCache)) + cacheElem := cache.lruList.Front() + // assert that both maps point to the same element in cache list + assert.Equal(t, cacheElem, cvElement) + assert.Equal(t, cacheElem, revElement) +} diff --git a/db/revision_test.go b/db/revision_test.go index 683e477a4d..5601dd4eda 100644 --- a/db/revision_test.go +++ b/db/revision_test.go @@ -131,7 +131,7 @@ func TestBackupOldRevision(t *testing.T) { // create rev 2 and check backups for both revs rev2ID := "2-abc" - _, _, err = collection.PutExistingRevWithBody(ctx, docID, Body{"test": true, "updated": true}, []string{rev2ID, rev1ID}, true) + _, _, err = collection.PutExistingRevWithBody(ctx, docID, Body{"test": true, "updated": true}, []string{rev2ID, rev1ID}, true, ExistingVersionWithUpdateToHLV) require.NoError(t, err) // now in all cases we'll have rev 1 backed up (for at least 5 minutes) diff --git a/rest/api_test.go b/rest/api_test.go index 377e8b06d8..f5ef62d435 100644 --- a/rest/api_test.go +++ b/rest/api_test.go @@ -2746,6 +2746,97 @@ func TestNullDocHandlingForMutable1xBody(t *testing.T) { assert.Contains(t, err.Error(), "null doc body for doc") } +// TestPutDocUpdateVersionVector: +// - Put a doc and assert that the versions and the source for the hlv is correctly updated +// - Update that doc and assert HLV has also been updated +// - Delete the doc and assert that the HLV has been updated in deletion event +func TestPutDocUpdateVersionVector(t *testing.T) { + rt := NewRestTester(t, nil) + defer rt.Close() + + bucketUUID, err := rt.GetDatabase().Bucket.UUID() + require.NoError(t, err) + + resp := rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"key": "value"}`) + RequireStatus(t, resp, http.StatusCreated) + + syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS := base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, bucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) + + // Put a new revision of this doc and assert that the version vector SourceID and Version is updated + resp = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1?rev="+syncData.CurrentRev, `{"key1": "value1"}`) + RequireStatus(t, resp, http.StatusCreated) + + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS = base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, bucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) + + // Delete doc and assert that the version vector SourceID and Version is updated + resp = rt.SendAdminRequest(http.MethodDelete, "/{{.keyspace}}/doc1?rev="+syncData.CurrentRev, "") + RequireStatus(t, resp, http.StatusOK) + + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS = base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, bucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) +} + +// TestHLVOnPutWithImportRejection: +// - Put a doc successfully and assert the HLV is updated correctly +// - Put a doc that will be rejected by the custom import filter +// - Assert that the HLV values on the sync data are still correctly updated/preserved +func TestHLVOnPutWithImportRejection(t *testing.T) { + base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport) + importFilter := `function (doc) { return doc.type == "mobile"}` + rtConfig := RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + AutoImport: false, + ImportFilter: &importFilter, + }}, + } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + bucketUUID, err := rt.GetDatabase().Bucket.UUID() + require.NoError(t, err) + + resp := rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"type": "mobile"}`) + RequireStatus(t, resp, http.StatusCreated) + + syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS := base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, bucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) + + // Put a doc that will be rejected by the import filter on the attempt to perform on demand import for write + resp = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc2", `{"type": "not-mobile"}`) + RequireStatus(t, resp, http.StatusCreated) + + // assert that the hlv is correctly updated and in tact after the import was cancelled on the doc + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc2") + assert.NoError(t, err) + uintCAS = base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, bucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) +} + func TestTombstoneCompactionAPI(t *testing.T) { rt := NewRestTester(t, nil) defer rt.Close() diff --git a/rest/attachment_test.go b/rest/attachment_test.go index 0b480d4127..053139deca 100644 --- a/rest/attachment_test.go +++ b/rest/attachment_test.go @@ -1060,6 +1060,7 @@ func TestAttachmentContentType(t *testing.T) { } func TestBasicAttachmentRemoval(t *testing.T) { + t.Skip("Disabled pending CBG-3503") rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) defer rt.Close() @@ -2223,6 +2224,7 @@ func TestAttachmentDeleteOnPurge(t *testing.T) { } func TestAttachmentDeleteOnExpiry(t *testing.T) { + t.Skip("Disabled pending CBG-3503") rt := NewRestTester(t, nil) defer rt.Close() @@ -2260,184 +2262,204 @@ func TestAttachmentDeleteOnExpiry(t *testing.T) { } func TestUpdateExistingAttachment(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Add doc1 and doc2 + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) + doc2Version := btc.rt.PutDoc(doc2ID, `{}`) - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + require.NoError(t, btc.rt.WaitForPendingChanges()) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) - require.NoError(t, err) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) + + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) + require.NoError(t, err) - doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - assert.NoError(t, err) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + doc1, err := btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), doc1ID, db.DocUnmarshalAll) + assert.NoError(t, err) - req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, "attachmentB", string(req.BodyBytes())) + assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + + req := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, "attachmentB", string(req.BodyBytes())) + }) } // TestPushUnknownAttachmentAsStub sets revpos to an older generation, for an attachment that doesn't exist on the server. // Verifies that getAttachment is triggered, and attachment is properly persisted. func TestPushUnknownAttachmentAsStub(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() - + } const doc1ID = "doc1" - doc1Version := rt.PutDoc(doc1ID, `{}`) + btcRunner := NewBlipTesterClientRunner(t) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Add doc1 and doc2 + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) + require.NoError(t, btc.rt.WaitForPendingChanges()) - // force attachment into test client's store to validate it's fetched - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - contentType := "text/plain" + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - length, digest, err := btc.saveAttachment(contentType, attachmentAData) - require.NoError(t, err) - // Update doc1, include reference to non-existing attachment with recent revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) - require.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + + // force attachment into test client's store to validate it's fetched + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + contentType := "text/plain" - require.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + length, digest, err := btcRunner.saveAttachment(btc.id, contentType, attachmentAData) + require.NoError(t, err) + // Update doc1, include reference to non-existing attachment with recent revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) + require.NoError(t, err) - // verify that attachment exists on document and was persisted - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + require.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + // verify that attachment exists on document and was persisted + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + }) } func TestMinRevPosWorkToAvoidUnnecessaryProveAttachment(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ AllowConflicts: base.BoolPtr(true), }, }, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } - // Push an initial rev with attachment data + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc" - initialVersion := rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) - // Replicate data to client and ensure doc arrives - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, initialVersion) - assert.True(t, found) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Push a revision with a bunch of history simulating doc updated on mobile device - // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment - proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() - revid, err := btc.PushRevWithHistory(docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Push an initial rev with attachment data + initialVersion := btc.rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Push another bunch of history - _, err = btc.PushRevWithHistory(docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + // Replicate data to client and ensure doc arrives + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, initialVersion) + assert.True(t, found) + + // Push a revision with a bunch of history simulating doc updated on mobile device + // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment + proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() + revid, err := btcRunner.PushRevWithHistory(btc.id, docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + + // Push another bunch of history + _, err = btcRunner.PushRevWithHistory(btc.id, docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + }) } func TestAttachmentWithErroneousRevPos(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) - // Create rev 1 with the hello.txt attachment - const docID = "doc" - version := rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Pull rev and attachment down to client - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, version) - assert.True(t, found) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Create rev 1 with the hello.txt attachment + const docID = "doc" + version := btc.rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Add an attachment to client - btc.AttachmentsLock().Lock() - btc.Attachments()["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") - btc.AttachmentsLock().Unlock() + // Pull rev and attachment down to client + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, found) - // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment - _, err = btc.PushRevWithHistory(docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) - require.NoError(t, err) + // Add an attachment to client + btcRunner.AttachmentsLock(btc.id).Lock() + btcRunner.Attachments(btc.id)["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") + btcRunner.AttachmentsLock(btc.id).Unlock() - // Ensure message and attachment is pushed up - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment + _, err = btcRunner.PushRevWithHistory(btc.id, docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) + require.NoError(t, err) - // Get the attachment and ensure the data is updated - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + // Ensure message and attachment is pushed up + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + // Get the attachment and ensure the data is updated + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + }) } // CBG-2004: Test that prove attachment over Blip works correctly when receiving a ErrAttachmentNotFound @@ -2578,74 +2600,79 @@ func TestPutInvalidAttachment(t *testing.T) { // validates that proveAttachment isn't being invoked when the attachment is already present and the // digest doesn't change, regardless of revpos. func TestCBLRevposHandling(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) + doc2Version := btc.rt.PutDoc(doc2ID, `{}`) + require.NoError(t, btc.rt.WaitForPendingChanges()) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - // Update doc1, don't change attachment, use correct revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) - // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) - require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - // Validate attachment exists - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use correct revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) + require.NoError(t, err) - attachmentPushCount := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment - _, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) - require.NoError(t, err) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - // Validate attachment exists and is updated - attResponse = rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) + require.NoError(t, err) - attachmentPushCountAfter := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + // Validate attachment exists + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + attachmentPushCount := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment + _, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) + require.NoError(t, err) + + // Validate attachment exists and is updated + attResponse = btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + + attachmentPushCountAfter := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + }) } // Helper_Functions diff --git a/rest/blip_api_attachment_test.go b/rest/blip_api_attachment_test.go index 266e580678..2f63d13880 100644 --- a/rest/blip_api_attachment_test.go +++ b/rest/blip_api_attachment_test.go @@ -43,56 +43,63 @@ func TestBlipPushPullV2AttachmentV2Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + // given this test is for v2 protocol, skip version vector test + btcRunner.SkipVersionVectorInitialization = true const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + opts := &BlipTesterClientOpts{} + opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - respBody := rt.GetDocVersion(docID, version) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // Test pushing and pulling v2 attachments with v3 client @@ -113,54 +120,59 @@ func TestBlipPushPullV2AttachmentV3Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // TestBlipProveAttachmentV2 ensures that CBL's proveAttachment for deduplication is working correctly even for v2 attachments which aren't de-duped on the server side. @@ -169,17 +181,6 @@ func TestBlipProveAttachmentV2(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) const ( doc1ID = "doc1" @@ -196,29 +197,45 @@ func TestBlipProveAttachmentV2(t *testing.T) { attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, - // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version := rt.PutDoc(doc1ID, doc1Body) - - data, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) - - // create doc2 now that we know the client has the attachment - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version := rt.PutDoc(doc2ID, doc2Body) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - data, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) - bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) - assert.Equal(t, int64(len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, + // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version := btc.rt.PutDoc(doc1ID, doc1Body) + + data, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + // create doc2 now that we know the client has the attachment + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version := btc.rt.PutDoc(doc2ID, doc2Body) + + data, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) + bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) + assert.Equal(t, int64(len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + }) } // TestBlipProveAttachmentV2Push ensures that CBL's attachment deduplication is ignored for push replications - resulting in new server-side digests and duplicated attachment data (v2 attachment format). @@ -227,15 +244,6 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - const ( doc1ID = "doc1" doc2ID = "doc2" @@ -251,26 +259,38 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { // attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version, err := btc.PushRev(doc1ID, EmptyDocVersion(), []byte(doc1Body)) - require.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - err = rt.WaitForVersion(doc1ID, doc1Version) - require.NoError(t, err) - - // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version, err := btc.PushRev(doc2ID, EmptyDocVersion(), []byte(doc2Body)) - require.NoError(t, err) - - err = rt.WaitForVersion(doc2ID, doc2Version) - require.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(2*len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version, err := btcRunner.PushRev(btc.id, doc1ID, EmptyDocVersion(), []byte(doc1Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc1ID, doc1Version) + require.NoError(t, err) + + // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version, err := btcRunner.PushRev(btc.id, doc2ID, EmptyDocVersion(), []byte(doc2Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc2ID, doc2Version) + require.NoError(t, err) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(2*len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { @@ -278,130 +298,139 @@ func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "2-abc", revId) - - // Wait for the documents to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, - // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err = btc.PushRevWithHistory(docID, revId, []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(4) - assert.True(t, ok) - - resp = rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "2-abc", revId) + + // Wait for the documents to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, + // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err = btcRunner.PushRevWithHistory(btc.id, docID, revId, []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(4) + assert.True(t, ok) + + resp = btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } func TestBlipPushPullNewAttachmentNoCommonAncestor(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) const docID = "doc1" - - // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. - // rev tree pruning on the CBL side, so 1-abc no longer exists. - // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "2-abc", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(4), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. + // rev tree pruning on the CBL side, so 1-abc no longer exists. + // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "2-abc", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(4), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } // Test Attachment replication behavior described here: https://github.com/couchbase/couchbase-lite-core/wiki/Replication-Protocol @@ -507,163 +536,181 @@ func TestPutAttachmentViaBlipGetViaBlip(t *testing.T) { // TestBlipAttachNameChange tests CBL handling - attachments with changed names are sent as stubs, and not new attachments func TestBlipAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - attachmentA := []byte("attachmentA") - attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) - digest := db.Sha1DigestKey(attachmentA) - - // Push initial attachment data - version, err := client1.PushRev("doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) - require.NoError(t, err) - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attachmentA) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - version, err = client1.PushRev("doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - // Check if attachment is still in bucket - bucketAttachmentA, _, err = rt.GetSingleDataStore().GetRaw(attachmentAKey) - assert.NoError(t, err) - assert.Equal(t, bucketAttachmentA, attachmentA) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attachmentA, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + + attachmentA := []byte("attachmentA") + attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) + digest := db.Sha1DigestKey(attachmentA) + + // Push initial attachment data + version, err := btcRunner.PushRev(client1.id, "doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) + require.NoError(t, err) + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attachmentA) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + // Check if attachment is still in bucket + bucketAttachmentA, _, err = client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + assert.NoError(t, err) + assert.Equal(t, bucketAttachmentA, attachmentA) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attachmentA, resp.BodyBytes()) + }) } // TestBlipLegacyAttachNameChange ensures that CBL name changes for legacy attachments are handled correctly func TestBlipLegacyAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - // Create document in the bucket with a legacy attachment - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - // Get the document and grab the revID. - docVersion, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, docVersion.RevID, rawDoc) - - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - docVersion, err = client1.PushRev("doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", docVersion) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + // Get the document and grab the revID. + docVersion, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, docVersion.RevID, rawDoc) + + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + docVersion, err = btcRunner.PushRev(client1.id, "doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", docVersion) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + }) } // TestBlipLegacyAttachDocUpdate ensures that CBL updates for documents associated with legacy attachments are handled correctly func TestBlipLegacyAttachDocUpdate(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - - // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - attName := "hi.txt" - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - version, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, version.RevID, rawDoc) - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - dataStore := rt.GetSingleDataStore() - bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Update the document, leaving body intact - version, err = client1.PushRev("doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) - - // Validate that the attachment hasn't been migrated to V2 - v1Key := db.MakeAttachmentKey(1, "doc", digest) - v1Body, _, err := dataStore.GetRaw(v1Key) - require.NoError(t, err) - require.EqualValues(t, attBody, v1Body) - - v2Key := db.MakeAttachmentKey(2, "doc", digest) - _, _, err = dataStore.GetRaw(v2Key) - require.Error(t, err) - // Confirm correct type of error for both integration test and Walrus - if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { - var keyValueErr *gocb.KeyValueError - require.True(t, errors.As(err, &keyValueErr)) - //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) - require.Equal(t, keyValueErr.DocumentID, v2Key) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, } + + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + attName := "hi.txt" + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + version, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, version.RevID, rawDoc) + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + dataStore := client1.rt.GetSingleDataStore() + bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Update the document, leaving body intact + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + + // Validate that the attachment hasn't been migrated to V2 + v1Key := db.MakeAttachmentKey(1, "doc", digest) + v1Body, _, err := dataStore.GetRaw(v1Key) + require.NoError(t, err) + require.EqualValues(t, attBody, v1Body) + + v2Key := db.MakeAttachmentKey(2, "doc", digest) + _, _, err = dataStore.GetRaw(v2Key) + require.Error(t, err) + // Confirm correct type of error for both integration test and Walrus + if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { + var keyValueErr *gocb.KeyValueError + require.True(t, errors.As(err, &keyValueErr)) + //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) + require.Equal(t, keyValueErr.DocumentID, v2Key) + } + }) } // TestAttachmentComputeStat: @@ -676,31 +723,33 @@ func TestAttachmentComputeStat(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() - // Wait for the document to be replicated to client. - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - // assert the attachment read compute stat is incremented - require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + // Wait for the document to be replicated to client. + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // assert the attachment read compute stat is incremented + require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + }) } diff --git a/rest/blip_api_collections_test.go b/rest/blip_api_collections_test.go index 7839daa4ee..5663e1227e 100644 --- a/rest/blip_api_collections_test.go +++ b/rest/blip_api_collections_test.go @@ -28,322 +28,344 @@ func TestBlipGetCollections(t *testing.T) { // checkpointIDWithError := "checkpointError" const defaultScopeAndCollection = "_default._default" - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{GuestEnabled: true}, 1) - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, - &BlipTesterClientOpts{ + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 1) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ SkipCollectionsInitialization: true, - }, - ) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - testCases := []struct { - name string - requestBody db.GetCollectionsRequestBody - resultBody []db.Body - errorCode string - }{ - { - name: "noDocInDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{defaultScopeAndCollection}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + testCases := []struct { + name string + requestBody db.GetCollectionsRequestBody + resultBody []db.Body + errorCode string + }{ + { + name: "noDocInDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "mismatchedLengthOnInput", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id", "id2"}, - Collections: []string{defaultScopeAndCollection}, + { + name: "mismatchedLengthOnInput", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id", "id2"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "inDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{defaultScopeAndCollection}, + { + name: "inDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "badScopeSpecificationEmptyString", - // bad scope specification - empty string - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{""}, + { + name: "badScopeSpecificationEmptyString", + // bad scope specification - empty string + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{""}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "presentNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{scopeAndCollection}, + { + name: "presentNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{checkpoint1Body}, + errorCode: "", }, - resultBody: []db.Body{checkpoint1Body}, - errorCode: "", - }, - { - name: "unseenInNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{scopeAndCollection}, + { + name: "unseenInNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{db.Body{}}, + errorCode: "", }, - resultBody: []db.Body{db.Body{}}, - errorCode: "", - }, - // { - // name: "checkpointExistsWithErrorInNonDefaultCollection", - // requestBody: db.GetCollectionsRequestBody{ - // CheckpointIDs: []string{checkpointIDWithError}, - // Collections: []string{scopeAndCollection}, - // }, - // resultBody: []db.Body{nil}, - // errorCode: "", - // }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the norev handler - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) - require.Equal(t, errorCode, testCase.errorCode) - if testCase.errorCode != "" { - return - } - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) + // { + // name: "checkpointExistsWithErrorInNonDefaultCollection", + // requestBody: db.GetCollectionsRequestBody{ + // CheckpointIDs: []string{checkpointIDWithError}, + // Collections: []string{scopeAndCollection}, + // }, + // resultBody: []db.Body{nil}, + // errorCode: "", + // }, + } - require.Equal(t, testCase.resultBody, checkpoints) - }) - } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the norev handler + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) + require.Equal(t, errorCode, testCase.errorCode) + if testCase.errorCode != "" { + return + } + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + + require.Equal(t, testCase.resultBody, checkpoints) + }) + } + }) } func TestBlipReplicationNoDefaultCollection(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + + subChangesRequest := blip.NewRequest() + subChangesRequest.SetProfile(db.MessageSubChanges) + + require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) + resp := subChangesRequest.Response() + require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - - subChangesRequest := blip.NewRequest() - subChangesRequest.SetProfile(db.MessageSubChanges) - - require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) - resp := subChangesRequest.Response() - require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) } func TestBlipGetCollectionsAndSetCheckpoint(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, - }) - - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the GetCollections - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.False(t, hasErrorCode) - require.Equal(t, errorCode, "") - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) - require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) - - // make sure other functions get called - - requestGetCheckpoint := blip.NewRequest() - requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) - requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 - requestGetCheckpoint.Properties[db.BlipCollection] = "0" - require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) - resp = requestGetCheckpoint.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] - require.Equal(t, errorCode, "") - require.False(t, hasErrorCode) - var checkpoint db.Body - err = resp.ReadJSONBody(&checkpoint) - require.NoErrorf(t, err, "Actual error %+v", checkpoint) - - require.Equal(t, db.Body{"seq": "123"}, checkpoint) + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, + }) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the GetCollections + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.False(t, hasErrorCode) + require.Equal(t, errorCode, "") + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) + + // make sure other functions get called + + requestGetCheckpoint := blip.NewRequest() + requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) + requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 + requestGetCheckpoint.Properties[db.BlipCollection] = "0" + require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) + resp = requestGetCheckpoint.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] + require.Equal(t, errorCode, "") + require.False(t, hasErrorCode) + var checkpoint db.Body + err = resp.ReadJSONBody(&checkpoint) + require.NoErrorf(t, err, "Actual error %+v", checkpoint) + + require.Equal(t, db.Body{"seq": "123"}, checkpoint) + }) } func TestCollectionsReplication(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - const docID = "doc1" - version := rt.PutDoc(docID, "{}") - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + version := btc.rt.PutDoc(docID, "{}") + require.NoError(t, btc.rt.WaitForPendingChanges()) - btcCollection := btc.SingleCollection() + btcCollection := btcRunner.SingleCollection(btc.id) - err = btcCollection.StartOneshotPull() - require.NoError(t, err) + err := btcCollection.StartOneshotPull() + require.NoError(t, err) - _, ok := btcCollection.WaitForVersion(docID, version) - require.True(t, ok) + _, ok := btcCollection.WaitForVersion(docID, version) + require.True(t, ok) + }) } func TestBlipReplicationMultipleCollections(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) - docName := "doc1" - body := `{"foo":"bar"}` - versions := make([]DocVersion, 0, len(rt.GetKeyspaces())) - for _, keyspace := range rt.GetKeyspaces() { - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) - RequireStatus(t, resp, http.StatusCreated) - versions = append(versions, DocVersionFromPutResponse(t, resp)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() - } - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartPull()) - } + docName := "doc1" + body := `{"foo":"bar"}` + versions := make([]DocVersion, 0, len(btc.rt.GetKeyspaces())) + for _, keyspace := range btc.rt.GetKeyspaces() { + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) + RequireStatus(t, resp, http.StatusCreated) + versions = append(versions, DocVersionFromPutResponse(t, resp)) + } + require.NoError(t, btc.rt.WaitForPendingChanges()) - for i, collectionClient := range btc.collectionClients { - msg, ok := collectionClient.WaitForVersion(docName, versions[i]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartPull()) + } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for i, collectionClient := range btc.collectionClients { + msg, ok := collectionClient.WaitForVersion(docName, versions[i]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } func TestBlipReplicationMultipleCollectionsMismatchedDocSizes(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - body := `{"foo":"bar"}` - collectionDocIDs := make(map[string][]string) - collectionVersions := make(map[string][]DocVersion) - require.Len(t, rt.GetKeyspaces(), 2) - for i, keyspace := range rt.GetKeyspaces() { - // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one - docCount := 10 - if i == 0 { - docCount = 1 + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + body := `{"foo":"bar"}` + collectionDocIDs := make(map[string][]string) + collectionVersions := make(map[string][]DocVersion) + require.Len(t, btc.rt.GetKeyspaces(), 2) + for i, keyspace := range btc.rt.GetKeyspaces() { + // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one + docCount := 10 + if i == 0 { + docCount = 1 + } + blipName := btc.rt.getCollectionsForBLIP()[i] + for j := 0; j < docCount; j++ { + docName := fmt.Sprintf("doc%d", j) + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) + RequireStatus(t, resp, http.StatusCreated) + + version := DocVersionFromPutResponse(t, resp) + collectionVersions[blipName] = append(collectionVersions[blipName], version) + collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + } } - blipName := rt.getCollectionsForBLIP()[i] - for j := 0; j < docCount; j++ { - docName := fmt.Sprintf("doc%d", j) - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) - RequireStatus(t, resp, http.StatusCreated) + require.NoError(t, btc.rt.WaitForPendingChanges()) - version := DocVersionFromPutResponse(t, resp) - collectionVersions[blipName] = append(collectionVersions[blipName], version) - collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartOneshotPull()) } - } - require.NoError(t, rt.WaitForPendingChanges()) - - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartOneshotPull()) - } - - for _, collectionClient := range btc.collectionClients { - versions := collectionVersions[collectionClient.collection] - docIDs := collectionDocIDs[collectionClient.collection] - msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for _, collectionClient := range btc.collectionClients { + versions := collectionVersions[collectionClient.collection] + docIDs := collectionDocIDs[collectionClient.collection] + msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } diff --git a/rest/blip_api_crud_test.go b/rest/blip_api_crud_test.go index 7c041f7cb5..e0b67f1aad 100644 --- a/rest/blip_api_crud_test.go +++ b/rest/blip_api_crud_test.go @@ -1152,53 +1152,8 @@ function(doc, oldDoc) { // Test send and retrieval of a doc. // -// Validate deleted handling (includes check for https://github.com/couchbase/sync_gateway/issues/3341) -func TestBlipSendAndGetRev(t *testing.T) { - - base.SetUpTestLogging(t, base.LevelInfo, base.KeyHTTP, base.KeySync, base.KeySyncMsg) - - rt := NewRestTester(t, nil) - defer rt.Close() - btSpec := BlipTesterSpec{ - connectingUsername: "user1", - connectingPassword: "1234", - } - bt, err := NewBlipTesterFromSpecWithRT(t, &btSpec, rt) - require.NoError(t, err, "Unexpected error creating BlipTester") - defer bt.Close() - - // Send non-deleted rev - sent, _, resp, err := bt.SendRev("sendAndGetRev", "1-abc", []byte(`{"key": "val", "channels": ["user1"]}`), blip.Properties{}) - assert.True(t, sent) - assert.NoError(t, err) - assert.Equal(t, "", resp.Properties["Error-Code"]) - - // Get non-deleted rev - response := bt.restTester.SendAdminRequest("GET", "/{{.keyspace}}/sendAndGetRev?rev=1-abc", "") - RequireStatus(t, response, 200) - var responseBody RestDocument - assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &responseBody), "Error unmarshalling GET doc response") - _, ok := responseBody[db.BodyDeleted] - assert.False(t, ok) - - // Tombstone the document - history := []string{"1-abc"} - sent, _, resp, err = bt.SendRevWithHistory("sendAndGetRev", "2-bcd", history, []byte(`{"key": "val", "channels": ["user1"]}`), blip.Properties{"deleted": "true"}) - assert.True(t, sent) - assert.NoError(t, err) - assert.Equal(t, "", resp.Properties["Error-Code"]) - - // Get the tombstoned document - response = bt.restTester.SendAdminRequest("GET", "/{{.keyspace}}/sendAndGetRev?rev=2-bcd", "") - RequireStatus(t, response, 200) - responseBody = RestDocument{} - assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &responseBody), "Error unmarshalling GET doc response") - deletedValue, deletedOK := responseBody[db.BodyDeleted].(bool) - assert.True(t, deletedOK) - assert.True(t, deletedValue) -} - -// Test send and retrieval of a doc with a large numeric value. Ensure proper large number handling. +// err := btc.StartPull() +// require.NoError(t, err) // // Validate deleted handling (includes check for https://github.com/couchbase/sync_gateway/issues/3341) func TestBlipSendAndGetLargeNumberRev(t *testing.T) { @@ -1774,381 +1729,11 @@ func TestGetRemovedDoc(t *testing.T) { // Reproduce issue SG #3738 // -// - Add 5 docs to channel ABC -// - Purge one doc via _purge REST API -// - Flush rev cache -// - Send subChanges request -// - Reply to all changes saying all docs are wanted -// - Wait to receive rev messages for all 5 docs -// - Expected: receive all 5 docs (4 revs and 1 norev) -// - Actual: only receive 4 docs (4 revs) -func TestMissingNoRev(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() - ctx := rt.Context() - - bt, err := NewBlipTesterFromSpecWithRT(t, nil, rt) - require.NoError(t, err, "Unexpected error creating BlipTester") - defer bt.Close() - - require.NoError(t, rt.WaitForDBOnline()) - - // Create 5 docs - for i := 0; i < 5; i++ { - docID := fmt.Sprintf("doc-%d", i) - docRev := fmt.Sprintf("1-abc%d", i) - sent, _, resp, err := bt.SendRev(docID, docRev, []byte(`{"key": "val", "channels": ["ABC"]}`), blip.Properties{}) - assert.True(t, sent) - require.NoError(t, err, "resp is %s", resp) - } - - // Pull docs, expect to pull 5 docs since none of them has purged yet. - docs, ok := bt.WaitForNumDocsViaChanges(5) - require.True(t, ok) - assert.Len(t, docs, 5) - - // Purge one doc - doc0Id := fmt.Sprintf("doc-%d", 0) - err = rt.GetSingleTestDatabaseCollectionWithUser().Purge(ctx, doc0Id) - assert.NoError(t, err, "failed") - - // Flush rev cache - rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() - - // Pull docs, expect to pull 4 since one was purged. (also expect to NOT get stuck) - docs, ok = bt.WaitForNumDocsViaChanges(4) - assert.True(t, ok) - assert.Len(t, docs, 4) - -} - -// TestBlipPullRevMessageHistory tests that a simple pull replication contains history in the rev message. -func TestBlipPullRevMessageHistory(t *testing.T) { - - base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - - sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - DeltaSync: &DeltaSyncConfig{ - Enabled: &sgUseDeltas, - }, - }}, - GuestEnabled: true, - } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true - - err = client.StartPull() - assert.NoError(t, err) - - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version -} - -// Reproduces CBG-617 (a client using activeOnly for the initial replication, and then still expecting to get subsequent tombstones afterwards) -func TestActiveOnlyContinuous(t *testing.T) { - - base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - const docID = "doc1" - version := rt.PutDoc(docID, `{"test":true}`) - - // start an initial pull - require.NoError(t, btc.StartPullSince("true", "0", "true")) - rev, found := btc.WaitForVersion(docID, version) - assert.True(t, found) - assert.Equal(t, `{"test":true}`, string(rev)) - - // delete the doc and make sure the client still gets the tombstone replicated - deletedVersion := rt.DeleteDocReturnVersion(docID, version) - - rev, found = btc.WaitForVersion(docID, deletedVersion) - assert.True(t, found) - assert.Equal(t, `{}`, string(rev)) -} - -// Test that exercises Sync Gateway's norev handler -func TestBlipNorev(t *testing.T) { - - base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - norevMsg := db.NewNoRevMessage() - norevMsg.SetId("docid") - norevMsg.SetRev("1-a") - norevMsg.SetSequence(db.SequenceID{Seq: 50}) - norevMsg.SetError("404") - norevMsg.SetReason("couldn't send xyz") - btc.addCollectionProperty(norevMsg.Message) - - // Couchbase Lite always sends noreply=true for norev messages - // but set to false so we can block waiting for a reply - norevMsg.SetNoReply(false) - - // Request that the handler used to process the message is sent back in the response - norevMsg.Properties[db.SGShowHandler] = "true" - - assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) - - // Check that the response we got back was processed by the norev handler - resp := norevMsg.Response() - assert.NotNil(t, resp) - assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) -} - -// TestNoRevSetSeq makes sure the correct string is used with the corresponding function -func TestNoRevSetSeq(t *testing.T) { - norevMsg := db.NewNoRevMessage() - assert.Equal(t, "", norevMsg.Properties[db.NorevMessageSeq]) - assert.Equal(t, "", norevMsg.Properties[db.NorevMessageSequence]) - - norevMsg.SetSequence(db.SequenceID{Seq: 50}) - assert.Equal(t, "50", norevMsg.Properties[db.NorevMessageSequence]) - - norevMsg.SetSeq(db.SequenceID{Seq: 60}) - assert.Equal(t, "60", norevMsg.Properties[db.NorevMessageSeq]) - -} - -func TestRemovedMessageWithAlternateAccess(t *testing.T) { - defer db.SuspendSequenceBatching()() - base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() - - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - - version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) - - version = rt.UpdateDoc(docID, version, `{"channels": []}`) - const docMarker = "docmarker" - docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) - - changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) - assert.False(t, changes.Results[0].Revoked) - assert.Equal(t, "docmarker", changes.Results[1].ID) - RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) - assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) - assert.False(t, changes.Results[1].Revoked) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docMarker, docMarkerVersion) - assert.True(t, ok) - - messages := btc.pullReplication.GetMessages() - - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message - } - } - } - - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 3) - require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A - require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B - require.Len(t, messageBody[2], 3) - - deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - id := messageBody[0].([]interface{})[1] - require.NoError(t, err) - assert.Equal(t, "doc", id) - assert.Equal(t, int64(4), deletedFlags) -} - -// TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication tests the following scenario: -// User has access to channel A and B -// Document rev 1 is in A and B -// Document rev 2 is in channel C -// Document rev 3 is in channel B -// User issues changes requests with since=0 for channel A -// Revocation should not be issued because the user currently has access to channel B, even though they didn't -// have access to the removal revision (rev 2). CBG-2277 - -func TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication(t *testing.T) { - defer db.SuspendSequenceBatching()() - base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() - - const ( - docID = "doc" - ) - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - - version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) - require.NoError(t, rt.WaitForPendingChanges()) - // At this point changes should send revocation, as document isn't in any of the user's channels - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) - - _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - markerID := "docmarker" - markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) - require.NoError(t, rt.WaitForPendingChanges()) - - // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API - assert.Equal(t, "doc", changes.Results[0].ID) - assert.Equal(t, markerID, changes.Results[1].ID) - - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(markerID, markerVersion) - assert.True(t, ok) - - messages := btc.pullReplication.GetMessages() - - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message - } - } - } - - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 1) - require.Len(t, messageBody[0], 3) // marker doc - require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) -} - -// Make sure that a client cannot open multiple subChanges subscriptions on a single blip context (SG #3222) -// - Open a one-off subChanges request, ensure it works. -// - Open a subsequent continuous request, and ensure it works. -// - Open another continuous subChanges, and asserts that it gets an error on the 2nd one, because the first is still running. -// - Open another one-off subChanges request, assert we still get an error. +// btc.Run(func(t *testing.T) { +// // Confirm no error message or panic is returned in response +// response, err := btc.UnsubPullChanges() +// assert.NoError(t, err) +// assert.Empty(t, response) // // Asserts on stats to test for regression of CBG-1824: Make sure SubChangesOneShotActive gets decremented when one shot // sub changes request has completed @@ -2362,53 +1947,57 @@ func TestBlipInternalPropertiesHandling(t *testing.T) { } // Setup - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Track last sequence for next changes feed - var changes ChangesResults - changes.Last_Seq = "0" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - for i, test := range testCases { - t.Run(test.name, func(t *testing.T) { - docID := fmt.Sprintf("test%d", i) - rawBody, err := json.Marshal(test.inputBody) - require.NoError(t, err) + // Track last sequence for next changes feed + var changes ChangesResults + changes.Last_Seq = "0" - _, err = client.PushRev(docID, EmptyDocVersion(), rawBody) + for i, test := range testCases { + t.Run(test.name, func(t *testing.T) { + docID := fmt.Sprintf("test%d", i) + rawBody, err := json.Marshal(test.inputBody) + require.NoError(t, err) - if test.expectReject { - assert.Error(t, err) - return - } - assert.NoError(t, err) - - // Wait for rev to be received on RT - err = rt.WaitForPendingChanges() - require.NoError(t, err) - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) - require.NoError(t, err) - - var bucketDoc map[string]interface{} - _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) - assert.NoError(t, err) - body := rt.GetDocBody(docID) - // Confirm input body is in the bucket doc - if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { - for k, v := range test.inputBody { - assert.Equal(t, v, bucketDoc[k]) - assert.Equal(t, v, body[k]) + _, err = btcRunner.PushRev(client.id, docID, EmptyDocVersion(), rawBody) + + if test.expectReject { + assert.Error(t, err) + return } - } - }) - } + assert.NoError(t, err) + + // Wait for rev to be received on RT + err = client.rt.WaitForPendingChanges() + require.NoError(t, err) + changes, err = client.rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) + require.NoError(t, err) + + var bucketDoc map[string]interface{} + _, err = client.rt.GetSingleDataStore().Get(docID, &bucketDoc) + assert.NoError(t, err) + body := client.rt.GetDocBody(docID) + // Confirm input body is in the bucket doc + if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { + for k, v := range test.inputBody { + assert.Equal(t, v, bucketDoc[k]) + assert.Equal(t, v, body[k]) + } + } + }) + } + }) } // CBG-2053: Test that the handleRev stats still increment correctly when going through the processRev function with @@ -2528,133 +2117,144 @@ func TestSendRevisionNoRevHandling(t *testing.T) { if !base.UnitTestUrlIsWalrus() { t.Skip("Skip LeakyBucket test when running in integration") } - testCases := []struct { - error error - expectNoRev bool - }{ - { - error: gocb.ErrDocumentNotFound, - expectNoRev: true, - }, - { - error: gocb.ErrOverload, - expectNoRev: false, - }, - } - for _, test := range testCases { - t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { - docName := fmt.Sprintf("%s", test.error) - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), - }) - defer rt.Close() - - leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) - require.True(t, ok) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - // Change noRev handler so it's known when a noRev is received - recievedNoRevs := make(chan *blip.Message) - btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { - fmt.Println("Received noRev", msg.Properties) - recievedNoRevs <- msg - } - - version := rt.PutDoc(docName, `{"foo":"bar"}`) - - // Make the LeakyBucket return an error - leakyDataStore.SetGetRawCallback(func(key string) error { - return test.error - }) - leakyDataStore.SetGetWithXattrCallback(func(key string) error { - return test.error - }) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), + } + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // test is for norev handling, this will be different in version vector subprotocol + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + leakyDataStore, ok := base.AsLeakyDataStore(btc.rt.Bucket().DefaultDataStore()) + require.True(t, ok) + + testCases := []struct { + error error + expectNoRev bool + }{ + { + error: gocb.ErrDocumentNotFound, + expectNoRev: true, + }, + { + error: gocb.ErrOverload, + expectNoRev: false, + }, + } + for _, test := range testCases { + t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { + docName := fmt.Sprintf("%s", test.error) + + // Change noRev handler so it's known when a noRev is received + recievedNoRevs := make(chan *blip.Message) + btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { + fmt.Println("Received noRev", msg.Properties) + recievedNoRevs <- msg + } - // Flush cache so document has to be retrieved from the leaky bucket - rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + version := btc.rt.PutDoc(docName, `{"foo":"bar"}`) - err = btc.StartPull() - require.NoError(t, err) + // Make the LeakyBucket return an error + leakyDataStore.SetGetRawCallback(func(key string) error { + return test.error + }) + leakyDataStore.SetGetWithXattrCallback(func(key string) error { + return test.error + }) - // Wait 3 seconds for noRev to be received - select { - case msg := <-recievedNoRevs: - if test.expectNoRev { - assert.Equal(t, docName, msg.Properties["id"]) - } else { - require.Fail(t, "Received unexpected noRev message", msg) + // Flush cache so document has to be retrieved from the leaky bucket + btc.rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + + err := btcRunner.StartPull(btc.id) + require.NoError(t, err) + + // Wait 3 seconds for noRev to be received + select { + case msg := <-recievedNoRevs: + if test.expectNoRev { + assert.Equal(t, docName, msg.Properties["id"]) + } else { + require.Fail(t, "Received unexpected noRev message", msg) + } + case <-time.After(3 * time.Second): + if test.expectNoRev { + require.Fail(t, "Didn't receive expected noRev") + } } - case <-time.After(3 * time.Second): - if test.expectNoRev { - require.Fail(t, "Didn't receive expected noRev") - } - } - // Make sure document did not get replicated - _, found := btc.GetVersion(docName, version) - assert.False(t, found) - }) - } + // Make sure document did not get replicated + _, found := btcRunner.GetVersion(btc.id, docName, version) + assert.False(t, found) + }) + } + }) } func TestUnsubChanges(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - // Confirm no error message or panic is returned in response - response, err := btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - - // Sub changes - err = btc.StartPull() - require.NoError(t, err) + rtConfig := &RestTesterConfig{GuestEnabled: true} const ( doc1ID = "doc1ID" doc2ID = "doc2ID" ) - doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) - _, found := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, found) + btcRunner := NewBlipTesterClientRunner(t) - activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous - require.EqualValues(t, 1, activeReplStat.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Unsub changes - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - // Wait for unsub changes to stop the sub changes being sent before sending document up - base.RequireWaitForStat(t, activeReplStat.Value, 0) - - // Confirm no more changes are being sent - doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) - err = rt.WaitForConditionWithOptions(func() bool { - _, found = btc.GetVersion("doc2", doc2Version) - return found - }, 10, 100) - assert.Error(t, err) - - // Confirm no error message is still returned when no subchanges active - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Confirm the pull replication can be restarted and it syncs doc2 - err = btc.StartPull() - require.NoError(t, err) - _, found = btc.WaitForVersion(doc2ID, doc2Version) - assert.True(t, found) + // Confirm no error message or panic is returned in response + response, err := btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + + // Sub changes + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + doc1Version := btc.rt.PutDoc(doc1ID, `{"key":"val1"}`) + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, found) + + activeReplStat := btc.rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous + require.EqualValues(t, 1, activeReplStat.Value()) + + // Unsub changes + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + // Wait for unsub changes to stop the sub changes being sent before sending document up + base.RequireWaitForStat(t, activeReplStat.Value, 0) + + // Confirm no more changes are being sent + doc2Version := btc.rt.PutDoc(doc2ID, `{"key":"val1"}`) + err = btc.rt.WaitForConditionWithOptions(func() bool { + _, found = btcRunner.GetVersion(btc.id, "doc2", doc2Version) + return found + }, 10, 100) + assert.Error(t, err) + + // Confirm no error message is still returned when no subchanges active + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + + // Confirm the pull replication can be restarted and it syncs doc2 + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + _, found = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + assert.True(t, found) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the replication. @@ -2671,47 +2271,48 @@ func TestRequestPlusPull(t *testing.T) { } }`, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() - - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() + database := client.rt.GetDatabase() + // Put a doc in channel PBS + response := client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Write a document granting user 'bernard' access to PBS + response = client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Start a regular one-shot pull - err = client.StartOneshotPullRequestPlus() - assert.NoError(t, err) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPullRequestPlus(client.id) + assert.NoError(t, err) - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) - - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the db config. @@ -2733,47 +2334,48 @@ func TestRequestPlusPullDbConfig(t *testing.T) { }, }, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() - - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() + database := client.rt.GetDatabase() + // Put a doc in channel PBS + response := client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Write a document granting user 'bernard' access to PBS + response = client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Start a regular one-shot pull - err = client.StartOneshotPull() - assert.NoError(t, err) - - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPull(client.id) + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestBlipRefreshUser makes sure there is no panic if a user gets deleted during a replication @@ -2794,53 +2396,55 @@ func TestBlipRefreshUser(t *testing.T) { rtConfig := RestTesterConfig{ SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - const username = "bernard" - // Initialize blip tester client (will create user) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - Channels: []string{"chan1"}, - }) - - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + // Initialize blip tester client (will create user) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ // This test will need refactoring when its getting fixed in CBG-3512 + Username: username, + Channels: []string{"chan1"}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // add chan1 explicitly - response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) - RequireStatus(t, response, http.StatusOK) + // add chan1 explicitly + response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) + RequireStatus(t, response, http.StatusOK) - const docID = "doc1" - version := rt.PutDoc(docID, `{"channels":["chan1"]}`) + const docID = "doc1" + version := rt.PutDoc(docID, `{"channels":["chan1"]}`) - // Start a regular one-shot pull - err = btc.StartPullSince("true", "0", "false") - require.NoError(t, err) + // Start a regular one-shot pull + err := btcRunner.StartPullSince(btc.id, "true", "0", "false") + require.NoError(t, err) - _, ok := btc.WaitForDoc(docID) - require.True(t, ok) + _, ok := btcRunner.WaitForDoc(btc.id, docID) + require.True(t, ok) - _, ok = btc.GetVersion(docID, version) - require.True(t, ok) + _, ok = btcRunner.GetVersion(btc.id, docID, version) + require.True(t, ok) - // delete user with an active blip connection - response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") - RequireStatus(t, response, http.StatusOK) + // delete user with an active blip connection + response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") + RequireStatus(t, response, http.StatusOK) - require.NoError(t, rt.WaitForPendingChanges()) + require.NoError(t, rt.WaitForPendingChanges()) - // further requests will 500, but shouldn't panic - unsubChangesRequest := blip.NewRequest() - unsubChangesRequest.SetProfile(db.MessageUnsubChanges) - btc.addCollectionProperty(unsubChangesRequest) + // further requests will 500, but shouldn't panic + unsubChangesRequest := blip.NewRequest() + unsubChangesRequest.SetProfile(db.MessageUnsubChanges) + btc.addCollectionProperty(unsubChangesRequest) - err = btc.pullReplication.sendMsg(unsubChangesRequest) - require.NoError(t, err) + err = btc.pullReplication.sendMsg(unsubChangesRequest) + require.NoError(t, err) - testResponse := unsubChangesRequest.Response() - require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) - body, err := testResponse.Body() - require.NoError(t, err) - require.NotContains(t, string(body), "Panic:") + testResponse := unsubChangesRequest.Response() + require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) + body, err := testResponse.Body() + require.NoError(t, err) + require.NotContains(t, string(body), "Panic:") + }) } diff --git a/rest/blip_api_delta_sync_test.go b/rest/blip_api_delta_sync_test.go index 74651c909e..93f0a1992f 100644 --- a/rest/blip_api_delta_sync_test.go +++ b/rest/blip_api_delta_sync_test.go @@ -33,57 +33,64 @@ func TestBlipDeltaSyncPushAttachment(t *testing.T) { const docID = "pushAttachmentDoc" - rt := NewRestTester(t, - &RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - DeltaSync: &DeltaSyncConfig{ - Enabled: base.BoolPtr(true), - }, - }}, - GuestEnabled: true, - }) - defer rt.Close() + rtConfig := &RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + DeltaSync: &DeltaSyncConfig{ + Enabled: base.BoolPtr(true), + }, + }}, + GuestEnabled: true, + } + + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Push first rev - version, err := btc.PushRev(docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) - require.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + // Push first rev + version, err := btcRunner.PushRev(btc.id, docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) + require.NoError(t, err) - // Push second rev with an attachment (no delta yet) - attData := base64.StdEncoding.EncodeToString([]byte("attach")) + // Push second rev with an attachment (no delta yet) + attData := base64.StdEncoding.EncodeToString([]byte("attach")) - version, err = btc.PushRev(docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) - require.NoError(t, err) + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) + require.NoError(t, err) - syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err := btc.rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found := syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found := syncData.Attachments["myAttachment"] + assert.True(t, found) - // Turn deltas on - btc.ClientDeltas = true + // Turn deltas on + btc.ClientDeltas = true - // Get existing body with the stub attachment, insert a new property and push as delta. - body, found := btc.GetVersion(docID, version) - require.True(t, found) + // Get existing body with the stub attachment, insert a new property and push as delta. + body, found := btcRunner.GetVersion(btc.id, docID, version) + require.True(t, found) - newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) - require.NoError(t, err) + newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) + require.NoError(t, err) - _, err = btc.PushRev(docID, version, newBody) - require.NoError(t, err) + _, err = btcRunner.PushRev(btc.id, docID, version, newBody) + require.NoError(t, err) - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err = btc.rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found = syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found = syncData.Attachments["myAttachment"] + assert.True(t, found) + + // set client deltas back to false for next run + btc.ClientDeltas = false + }) } // Test pushing and pulling new attachments through delta sync @@ -106,59 +113,63 @@ func TestBlipDeltaSyncPushPullNewAttachment(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btc.ClientDeltas = true - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + btc.ClientDeltas = true + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client by adding another attachment. - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 2) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - world, ok := attachments["world.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) - assert.Equal(t, float64(11), world["length"]) - assert.Equal(t, float64(2), world["revpos"]) - assert.Equal(t, true, world["stub"]) + // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client by adding another attachment. + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 2) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + world, ok := attachments["world.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) + assert.Equal(t, float64(11), world["length"]) + assert.Equal(t, float64(2), world["revpos"]) + assert.Equal(t, true, world["stub"]) + }) } // TestBlipDeltaSyncNewAttachmentPull tests that adding a new attachment in SG and replicated via delta sync adds the attachment @@ -175,84 +186,87 @@ func TestBlipDeltaSyncNewAttachmentPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner := NewBlipTesterClientRunner(t) + const doc1ID = "doc1" - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - const doc1ID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment - version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) - - data, ok = client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - var dataMap map[string]interface{} - assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) - atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, atts, 1) - hello, ok := atts["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // message #3 is the getAttachment message that is sent in-between rev processing - msg, ok := client.pullReplication.WaitForMessage(3) - assert.True(t, ok) - assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") - - // Check EE is delta, and CE is full-body replication - // msg, ok = client.pullReplication.WaitForMessage(5) - msg, ok = client.WaitForBlipRevMessage(doc1ID, version) - assert.True(t, ok) - - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) - assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) - } - respBody := rt.GetDocVersion(doc1ID, version) - assert.Equal(t, doc1ID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 2) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - atts = respBody[db.BodyAttachments].(map[string]interface{}) - assert.Len(t, atts, 1) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := client.rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment + version = client.rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) + + data, ok = btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + var dataMap map[string]interface{} + assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) + atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, atts, 1) + hello, ok := atts["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // message #3 is the getAttachment message that is sent in-between rev processing + msg, ok := client.pullReplication.WaitForMessage(3) + assert.True(t, ok) + assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") + + // Check EE is delta, and CE is full-body replication + // msg, ok = client.pullReplication.WaitForMessage(5) + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, doc1ID, version) + assert.True(t, ok) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) + assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) + } + + respBody := client.rt.GetDocVersion(doc1ID, version) + assert.Equal(t, doc1ID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 2) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + atts = respBody[db.BodyAttachments].(map[string]interface{}) + assert.Len(t, atts, 1) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + }) } // TestBlipDeltaSyncPull tests that a simple pull replication uses deltas in EE, @@ -262,7 +276,7 @@ func TestBlipDeltaSyncPull(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -270,66 +284,69 @@ func TestBlipDeltaSyncPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() var deltaSentCount int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaSentCount = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - // Check EE is delta, and CE is full-body replication - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - var afterDeltaSyncCount int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version = client.rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version) + assert.True(t, ok) + + // Check EE is delta, and CE is full-body replication + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + + var afterDeltaSyncCount int64 + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + afterDeltaSyncCount = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + assert.Equal(t, deltaSentCount, afterDeltaSyncCount) } - - assert.Equal(t, deltaSentCount, afterDeltaSyncCount) - } + }) } // TestBlipDeltaSyncPullResend tests that a simple pull replication that uses a delta a client rejects will resend the revision in full. @@ -341,7 +358,7 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: base.BoolPtr(true), @@ -349,58 +366,60 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - docID := "doc1" - // create doc1 rev 1 - docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - // reject deltas built ontop of rev 1 - client.rejectDeltasForSrcRev = docVersion1.RevID - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - data, ok := client.WaitForVersion(docID, docVersion1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2 - docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = client.WaitForVersion(docID, docVersion2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - - // Check the request was initially sent with the correct deltaSrc property - assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - - msg, ok = client.WaitForBlipRevMessage(docID, docVersion2) - assert.True(t, ok) - - // Check the resent request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err = msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + // create doc1 rev 1 + docVersion1 := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + deltaSentCount := client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + + // reject deltas built ontop of rev 1 + client.rejectDeltasForSrcRev = docVersion1.RevID + + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + data, ok := btcRunner.WaitForVersion(client.id, docID, docVersion1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2 + docVersion2 := client.rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, docVersion2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + + // Check the request was initially sent with the correct deltaSrc property + assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, docID, docVersion2) + assert.True(t, ok) + + // Check the resent request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err = msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullRemoved tests a simple pull replication that drops a document out of the user's channel. @@ -409,7 +428,7 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -419,43 +438,45 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer client.Close() - - err = client.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 - version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"_removed":true}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_removed":true}`, string(msgBody)) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer client.Close() + + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 + version := client.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 + version = client.rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"_removed":true}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_removed":true}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullTombstoned tests a simple pull replication that deletes a document. @@ -473,7 +494,7 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -483,78 +504,79 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client.Close() + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - err = client.StartPull() - assert.NoError(t, err) + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{}`, string(msgBody)) - assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) - - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := client.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = client.rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{}`, string(msgBody)) + assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullTombstonedStarChan tests two clients can perform a simple pull replication that deletes a document when the user has access to the star channel. @@ -576,129 +598,133 @@ func TestBlipDeltaSyncPullTombstonedStarChan(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyHTTP, base.KeyCache, base.KeySync, base.KeySyncMsg) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + rtConfig := RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - client1, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client1", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client1.Close() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - client2, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client2", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client2.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = client1.StartPull() - require.NoError(t, err) + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client1", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client1.Close() + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client2", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client2.Close() - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // Have client2 get only rev-1 and then stop replicating - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok := client1.WaitForBlipRevMessage(docID, version) // docid, revid to get the message - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - msgBody, err := msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } + if client1.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok = client2.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - msgBody, err = msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } + err := btcRunner.StartPull(client1.id) + require.NoError(t, err) - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := client1.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + data, ok := btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) - assert.Equal(t, deltasSentStart+2, deltasSentEnd) - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + // Have client2 get only rev-1 and then stop replicating + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = client1.rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client1.id, docID, version) // docid, revid to get the message + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + msgBody, err := msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + + // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok = btcRunner.WaitForBlipRevMessage(client2.id, docID, version) + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + msgBody, err = msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if client1.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) + assert.Equal(t, deltasSentStart+2, deltasSentEnd) + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullRevCache tests that a simple pull replication uses deltas in EE, @@ -720,79 +746,78 @@ func TestBlipDeltaSyncPullRevCache(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client2.Close() + client.ClientDeltas = true - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // Perform a one-shot pull as client 2 to pull down the first revision - - client2, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client2.Close() - - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check EE is delta - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - - deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - msg2, ok := client2.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody2, err := msg2.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) - - updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) - assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // Perform a one-shot pull as client 2 to pull down the first revision + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := client.rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) + data, ok = btcRunner.WaitForVersion(client.id, docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version2) + assert.True(t, ok) + + // Check EE is delta + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + deltaCacheHits := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMisses := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + msg2, ok := btcRunner.WaitForBlipRevMessage(client2.id, docID, version2) + assert.True(t, ok) + + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody2, err := msg2.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) + + updatedDeltaCacheHits := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + updatedDeltaCacheMisses := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) + assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + }) } // TestBlipDeltaSyncPush tests that a simple push replication handles deltas in EE, @@ -801,7 +826,7 @@ func TestBlipDeltaSyncPush(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -809,96 +834,99 @@ func TestBlipDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true - - err = client.StartPull() - assert.NoError(t, err) - - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abc on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() + collection := client.rt.GetSingleTestDatabaseCollection() + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - // Validate that generation of a delta didn't mutate the revision body in the revision cache - docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) - assert.NoError(t, cacheErr) - assert.NotContains(t, docRev.BodyBytes, "bob") - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abc on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - } - respBody := rt.GetDocVersion(docID, newRev) - assert.Equal(t, "doc1", respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 3) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + // Validate that generation of a delta didn't mutate the revision body in the revision cache + docRev, cacheErr := client.rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) + assert.NoError(t, cacheErr) + assert.NotContains(t, docRev.BodyBytes, "bob") + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + } - // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) - deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) + respBody := client.rt.GetDocVersion(docID, newRev) + assert.Equal(t, "doc1", respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 3) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) - data, ok = client.WaitForVersion(docID, deletedVersion) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) + // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) + deletedVersion := client.rt.DeleteDocReturnVersion(docID, newRev) - var deltaPushDocCountStart int64 + data, ok = btcRunner.WaitForVersion(client.id, docID, deletedVersion) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } + var deltaPushDocCountStart int64 - _, err = client.PushRev(docID, deletedVersion, []byte(`{"undelete":true}`)) + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } - if base.IsEnterpriseEdition() { - // Now make the client push up a delta that has the parent of the tombstone. - // This is not a valid scenario, and is actively prevented on the CBL side. - assert.Error(t, err) - assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") - } else { - // Pushing a full body revision on top of a tombstone is valid. - // CBL clients should fall back to this. The test client doesn't. - assert.NoError(t, err) - } + _, err = btcRunner.PushRev(client.id, docID, deletedVersion, []byte(`{"undelete":true}`)) + + if base.IsEnterpriseEdition() { + // Now make the client push up a delta that has the parent of the tombstone. + // This is not a valid scenario, and is actively prevented on the CBL side. + assert.Error(t, err) + assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") + } else { + // Pushing a full body revision on top of a tombstone is valid. + // CBL clients should fall back to this. The test client doesn't. + assert.NoError(t, err) + } - var deltaPushDocCountEnd int64 + var deltaPushDocCountEnd int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } - assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + if client.rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } + assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + }) } // TestBlipNonDeltaSyncPush tests that a client that doesn't support deltas can push to a SG that supports deltas (either CE or EE) @@ -906,7 +934,7 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -914,41 +942,44 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - client.ClientDeltas = false - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abcxyz on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) - - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - - body := rt.GetDocVersion("doc1", newRev) - require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + collection := client.rt.GetSingleTestDatabaseCollection() + client.ClientDeltas = false + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abcxyz on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + assert.NoError(t, err) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + + body := client.rt.GetDocVersion("doc1", newRev) + require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + }) } diff --git a/rest/blip_api_no_race_test.go b/rest/blip_api_no_race_test.go index f6e35f9cf1..286db3905a 100644 --- a/rest/blip_api_no_race_test.go +++ b/rest/blip_api_no_race_test.go @@ -44,65 +44,68 @@ func TestBlipPusherUpdateDatabase(t *testing.T) { GuestEnabled: true, CustomTestBucket: tb.NoCloseClone(), } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - var lastPushRevErr atomic.Value - - // Wait for the background updates to finish at the end of the test - shouldCreateDocs := base.NewAtomicBool(true) - wg := sync.WaitGroup{} - wg.Add(1) - defer func() { - shouldCreateDocs.Set(false) - wg.Wait() - }() - - // Start the test client creating and pushing documents in the background - go func() { - for i := 0; shouldCreateDocs.IsTrue(); i++ { - // this will begin to error when the database is reloaded underneath the replication - _, err := client.PushRev(fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) - if err != nil { - lastPushRevErr.Store(err) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + + var lastPushRevErr atomic.Value + + // Wait for the background updates to finish at the end of the test + shouldCreateDocs := base.NewAtomicBool(true) + wg := sync.WaitGroup{} + wg.Add(1) + defer func() { + shouldCreateDocs.Set(false) + wg.Wait() + }() + + // Start the test client creating and pushing documents in the background + go func() { + for i := 0; shouldCreateDocs.IsTrue(); i++ { + // this will begin to error when the database is reloaded underneath the replication + _, err := btcRunner.PushRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) + if err != nil { + lastPushRevErr.Store(err) + } } - } - _ = rt.WaitForPendingChanges() - wg.Done() - }() - - // and wait for a few to be done before we proceed with updating database config underneath replication - _, err = rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) - require.NoError(t, err) - - // just change the sync function to cause the database to reload - dbConfig := *rt.ServerContext().GetDbConfig("db") - dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) - resp := rt.ReplaceDbConfig("db", dbConfig) - RequireStatus(t, resp, http.StatusCreated) - - // Did we tell the client to close the connection (via HTTP/503)? - // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. - WaitAndAssertCondition(t, func() bool { - lastErr, ok := lastPushRevErr.Load().(error) - if !ok { - return false - } - if lastErr == nil { - return false - } - lastErrMsg := lastErr.Error() - if !strings.Contains(lastErrMsg, "HTTP 503") { - return false - } - if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { - return false - } - return true - }, "expected HTTP 503 error") - + _ = rt.WaitForPendingChanges() + wg.Done() + }() + + // and wait for a few to be done before we proceed with updating database config underneath replication + _, err := rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) + require.NoError(t, err) + + // just change the sync function to cause the database to reload + dbConfig := *rt.ServerContext().GetDbConfig("db") + dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) + resp := rt.ReplaceDbConfig("db", dbConfig) + RequireStatus(t, resp, http.StatusCreated) + + // Did we tell the client to close the connection (via HTTP/503)? + // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. + WaitAndAssertCondition(t, func() bool { + lastErr, ok := lastPushRevErr.Load().(error) + if !ok { + return false + } + if lastErr == nil { + return false + } + lastErrMsg := lastErr.Error() + if !strings.Contains(lastErrMsg, "HTTP 503") { + return false + } + if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { + return false + } + return true + }, "expected HTTP 503 error") + }) } diff --git a/rest/blip_client_test.go b/rest/blip_client_test.go index f3e808aae0..2d0c69d97e 100644 --- a/rest/blip_client_test.go +++ b/rest/blip_client_test.go @@ -46,6 +46,7 @@ type BlipTesterClientOpts struct { type BlipTesterClient struct { BlipTesterClientOpts + id uint32 // unique ID for the client rt *RestTester pullReplication *BlipTesterReplicator // SG -> CBL replications pushReplication *BlipTesterReplicator // CBL -> SG replications @@ -69,6 +70,14 @@ type BlipTesterCollectionClient struct { lastReplicatedRevLock sync.RWMutex // lock for lastReplicatedRev map } +// BlipTestClientRunner is for running the blip tester client and its associated methods in test framework +type BlipTestClientRunner struct { + clients map[uint32]*BlipTesterClient // map of created BlipTesterClient's + t *testing.T + initialisedInsideRunnerCode bool // flag to check that the BlipTesterClient is being initialised in the correct area (inside the Run() method) + SkipVersionVectorInitialization bool // used to skip the version vector subtest +} + type BodyMessagePair struct { body []byte message *blip.Message @@ -85,6 +94,14 @@ type BlipTesterReplicator struct { replicationStats *db.BlipSyncStats // Stats of replications } +// NewBlipTesterClientRunner creates a BlipTestClientRunner type +func NewBlipTesterClientRunner(t *testing.T) *BlipTestClientRunner { + return &BlipTestClientRunner{ + t: t, + clients: make(map[uint32]*BlipTesterClient), + } +} + func (btr *BlipTesterReplicator) Close() { btr.bt.Close() btr.messagesLock.Lock() @@ -571,33 +588,83 @@ func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { return collections } -func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { +// NewBlipTesterClientOptsWithRT creates a BlipTesterClient and adds it to the map of clients on the BlipTestClientRunner. Then creates replications on the client +func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { + if !btcRunner.initialisedInsideRunnerCode { + btcRunner.t.Fatalf("must initialise BlipTesterClient inside Run() method") + } if opts == nil { opts = &BlipTesterClientOpts{} } - btc := BlipTesterClient{ + id, err := uuid.NewRandom() + require.NoError(btcRunner.t, err) + + client = &BlipTesterClient{ BlipTesterClientOpts: *opts, rt: rt, + id: id.ID(), + } + btcRunner.clients[client.id] = client + err = client.createBlipTesterReplications() + require.NoError(btcRunner.t, err) + + return client +} + +func (btc *BlipTesterClient) Close() { + btc.tearDownBlipClientReplications() + for _, collectionClient := range btc.collectionClients { + collectionClient.Close() } + if btc.nonCollectionAwareClient != nil { + btc.nonCollectionAwareClient.Close() + } +} + +// Run runs two subtests of the input test code, one for rev tree replications and another for version vector enabled replication +func (btcRunner *BlipTestClientRunner) Run(test func(t *testing.T, SupportedBLIPProtocols []string)) { + btcRunner.initialisedInsideRunnerCode = true + // reset to protect against someone creating a new client after Run() is run + defer func() { btcRunner.initialisedInsideRunnerCode = false }() + btcRunner.t.Run("revTree", func(t *testing.T) { + test(t, []string{db.BlipCBMobileReplicationV3}) + }) + // if test is not wanting version vector subprotocol to be run, return before we start this subtest + if btcRunner.SkipVersionVectorInitialization { + return + } + btcRunner.t.Run("versionVector", func(t *testing.T) { + // bump sub protocol version here and pass into test function pending CBG-3253 + test(t, nil) + }) +} +// tearDownBlipClientReplications closes any underlying BlipTesterReplications running on the BlipTesterClient +func (btc *BlipTesterClient) tearDownBlipClientReplications() { + btc.pullReplication.Close() + btc.pushReplication.Close() +} + +// createBlipTesterReplications initiates new BlipTesterReplications on the BlipTesterClient +func (btc *BlipTesterClient) createBlipTesterReplications() error { id, err := uuid.NewRandom() if err != nil { - return nil, err + return err } - if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - collections := getCollectionsForBLIP(tb, rt) - if !opts.SkipCollectionsInitialization && len(collections) > 0 { + collections := getCollectionsForBLIP(btc.rt.TB, btc.rt) + if !btc.BlipTesterClientOpts.SkipCollectionsInitialization && len(collections) > 0 { btc.collectionClients = make([]*BlipTesterCollectionClient, len(collections)) for i, collection := range collections { if err := btc.initCollectionReplication(collection, i); err != nil { - return nil, err + return err } } } else { @@ -605,40 +672,14 @@ func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterC docs: make(map[string]map[string]*BodyMessagePair), attachments: make(map[string][]byte), lastReplicatedRev: make(map[string]string), - parent: &btc, + parent: btc, } - } - return &btc, nil -} - -// NewBlipTesterClient returns a client which emulates the behaviour of a CBL client over BLIP. -func NewBlipTesterClient(tb testing.TB, rt *RestTester) (client *BlipTesterClient, err error) { - return createBlipTesterClientOpts(tb, rt, nil) -} - -func NewBlipTesterClientOptsWithRT(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { - client, err = createBlipTesterClientOpts(tb, rt, opts) - if err != nil { - return nil, err - } - - client.pullReplication.bt.avoidRestTesterClose = true - client.pushReplication.bt.avoidRestTesterClose = true - - return client, nil -} + btc.pullReplication.bt.avoidRestTesterClose = true + btc.pushReplication.bt.avoidRestTesterClose = true -func (btc *BlipTesterClient) Close() { - btc.pullReplication.Close() - btc.pushReplication.Close() - for _, collectionClient := range btc.collectionClients { - collectionClient.Close() - } - if btc.nonCollectionAwareClient != nil { - btc.nonCollectionAwareClient.Close() - } + return nil } func (btc *BlipTesterClient) initCollectionReplication(collection string, collectionIdx int) error { @@ -668,25 +709,25 @@ func (btc *BlipTesterClient) waitForReplicationMessage(collection *db.DatabaseCo } // SingleCollection returns a single collection blip tester if the RestTester database is configured with only one collection. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) SingleCollection() *BlipTesterCollectionClient { - if btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) SingleCollection(clientID uint32) *BlipTesterCollectionClient { + if btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - require.Equal(btc.rt.TB, 1, len(btc.collectionClients)) - return btc.collectionClients[0] + require.Equal(btcRunner.clients[clientID].rt.TB, 1, len(btcRunner.clients[clientID].collectionClients)) + return btcRunner.clients[clientID].collectionClients[0] } // Collection return a collection blip tester by name, if configured in the RestTester database. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) Collection(collectionName string) *BlipTesterCollectionClient { - if collectionName == "_default._default" && btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) Collection(clientID uint32, collectionName string) *BlipTesterCollectionClient { + if collectionName == "_default._default" && btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - for _, collectionClient := range btc.collectionClients { + for _, collectionClient := range btcRunner.clients[clientID].collectionClients { if collectionClient.collection == collectionName { return collectionClient } } - btc.rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) + btcRunner.clients[clientID].rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) return nil } @@ -1126,81 +1167,81 @@ func (btc *BlipTesterCollectionClient) GetBlipRevMessage(docID, revID string) (m return nil, false } -func (btc *BlipTesterClient) StartPull() error { - return btc.SingleCollection().StartPull() +func (btcRunner *BlipTestClientRunner) StartPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartPull() } // WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. -func (btc *BlipTesterClient) WaitForVersion(docID string, docVersion DocVersion) (data []byte, found bool) { - return btc.SingleCollection().WaitForVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForVersion(clientID uint32, docID string, docVersion DocVersion) (data []byte, found bool) { + return btcRunner.SingleCollection(clientID).WaitForVersion(docID, docVersion) } -func (btc *BlipTesterClient) WaitForDoc(docID string) ([]byte, bool) { - return btc.SingleCollection().WaitForDoc(docID) +func (btcRunner *BlipTestClientRunner) WaitForDoc(clientID uint32, docID string) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).WaitForDoc(docID) } -func (btc *BlipTesterClient) WaitForBlipRevMessage(docID string, docVersion DocVersion) (*blip.Message, bool) { - return btc.SingleCollection().WaitForBlipRevMessage(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForBlipRevMessage(clientID uint32, docID string, docVersion DocVersion) (*blip.Message, bool) { + return btcRunner.SingleCollection(clientID).WaitForBlipRevMessage(docID, docVersion) } -func (btc *BlipTesterClient) StartOneshotPull() error { - return btc.SingleCollection().StartOneshotPull() +func (btcRunner *BlipTestClientRunner) StartOneshotPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPull() } -func (btc *BlipTesterClient) StartOneshotPullFiltered(channels string) error { - return btc.SingleCollection().StartOneshotPullFiltered(channels) +func (btcRunner *BlipTestClientRunner) StartOneshotPullFiltered(clientID uint32, channels string) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullFiltered(channels) } -func (btc *BlipTesterClient) StartOneshotPullRequestPlus() error { - return btc.SingleCollection().StartOneshotPullRequestPlus() +func (btcRunner *BlipTestClientRunner) StartOneshotPullRequestPlus(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullRequestPlus() } -func (btc *BlipTesterClient) PushRev(docID string, version DocVersion, body []byte) (DocVersion, error) { - return btc.SingleCollection().PushRev(docID, version, body) +func (btcRunner *BlipTestClientRunner) PushRev(clientID uint32, docID string, version DocVersion, body []byte) (DocVersion, error) { + return btcRunner.SingleCollection(clientID).PushRev(docID, version, body) } -func (btc *BlipTesterClient) StartPullSince(continuous, since, activeOnly string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, "", "") +func (btcRunner *BlipTestClientRunner) StartPullSince(clientID uint32, continuous, since, activeOnly string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, "", "") } -func (btc *BlipTesterClient) StartFilteredPullSince(continuous, since, activeOnly string, channels string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, channels, "") +func (btcRunner *BlipTestClientRunner) StartFilteredPullSince(clientID uint32, continuous, since, activeOnly, channels string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, channels, "") } -func (btc *BlipTesterClient) GetVersion(docID string, docVersion DocVersion) ([]byte, bool) { - return btc.SingleCollection().GetVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) GetVersion(clientID uint32, docID string, docVersion DocVersion) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).GetVersion(docID, docVersion) } -func (btc *BlipTesterClient) saveAttachment(contentType string, attachmentData string) (int, string, error) { - return btc.SingleCollection().saveAttachment(contentType, attachmentData) +func (btcRunner *BlipTestClientRunner) saveAttachment(clientID uint32, contentType string, attachmentData string) (int, string, error) { + return btcRunner.SingleCollection(clientID).saveAttachment(contentType, attachmentData) } -func (btc *BlipTesterClient) StoreRevOnClient(docID, revID string, body []byte) error { - return btc.SingleCollection().StoreRevOnClient(docID, revID, body) +func (btcRunner *BlipTestClientRunner) StoreRevOnClient(clientID uint32, docID, revID string, body []byte) error { + return btcRunner.SingleCollection(clientID).StoreRevOnClient(docID, revID, body) } -func (btc *BlipTesterClient) PushRevWithHistory(docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { - return btc.SingleCollection().PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) +func (btcRunner *BlipTestClientRunner) PushRevWithHistory(clientID uint32, docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { + return btcRunner.SingleCollection(clientID).PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) } -func (btc *BlipTesterClient) AttachmentsLock() *sync.RWMutex { - return &btc.SingleCollection().attachmentsLock +func (btcRunner *BlipTestClientRunner) AttachmentsLock(clientID uint32) *sync.RWMutex { + return &btcRunner.SingleCollection(clientID).attachmentsLock } func (btc *BlipTesterCollectionClient) AttachmentsLock() *sync.RWMutex { return &btc.attachmentsLock } -func (btc *BlipTesterClient) Attachments() map[string][]byte { - return btc.SingleCollection().attachments +func (btcRunner *BlipTestClientRunner) Attachments(clientID uint32) map[string][]byte { + return btcRunner.SingleCollection(clientID).attachments } func (btc *BlipTesterCollectionClient) Attachments() map[string][]byte { return btc.attachments } -func (btc *BlipTesterClient) UnsubPullChanges() ([]byte, error) { - return btc.SingleCollection().UnsubPullChanges() +func (btcRunner *BlipTestClientRunner) UnsubPullChanges(clientID uint32) ([]byte, error) { + return btcRunner.SingleCollection(clientID).UnsubPullChanges() } func (btc *BlipTesterCollectionClient) addCollectionProperty(msg *blip.Message) { diff --git a/rest/bulk_api.go b/rest/bulk_api.go index 83358e7c49..15f6b731a6 100644 --- a/rest/bulk_api.go +++ b/rest/bulk_api.go @@ -511,7 +511,7 @@ func (h *handler) handleBulkDocs() error { err = base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions") } else { revid = revisions[0] - _, _, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, doc, revisions, false) + _, _, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, doc, revisions, false, db.ExistingVersionWithUpdateToHLV) } } diff --git a/rest/doc_api.go b/rest/doc_api.go index 4c278e8f0c..d7ca12924e 100644 --- a/rest/doc_api.go +++ b/rest/doc_api.go @@ -471,7 +471,7 @@ func (h *handler) handlePutDoc() error { if revisions == nil { return base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions") } - doc, newRev, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, body, revisions, false) + doc, newRev, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, body, revisions, false, db.ExistingVersionWithUpdateToHLV) if err != nil { return err } @@ -548,7 +548,7 @@ func (h *handler) handlePutDocReplicator2(docid string, roundTrip bool) (err err newDoc.UpdateBody(body) } - doc, rev, err := h.collection.PutExistingRev(h.ctx(), newDoc, history, true, false, nil) + doc, rev, err := h.collection.PutExistingRev(h.ctx(), newDoc, history, true, false, nil, db.ExistingVersionWithUpdateToHLV) if err != nil { return err diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go index 0fa9f61b82..6426dfe99d 100644 --- a/rest/importtest/import_test.go +++ b/rest/importtest/import_test.go @@ -424,6 +424,9 @@ func TestXattrDoubleDelete(t *testing.T) { } func TestViewQueryTombstoneRetrieval(t *testing.T) { + t.Skip("Disabled pending CBG-3503") + base.SkipImportTestsIfNotEnabled(t) + if !base.TestsDisableGSI() { t.Skip("views tests are not applicable under GSI") } diff --git a/rest/replicatortest/replicator_test.go b/rest/replicatortest/replicator_test.go index 8c67c764c3..7dc1fa4118 100644 --- a/rest/replicatortest/replicator_test.go +++ b/rest/replicatortest/replicator_test.go @@ -8320,3 +8320,46 @@ func requireBodyEqual(t *testing.T, expected string, doc *db.Document) { require.NoError(t, base.JSONUnmarshal([]byte(expected), &expectedBody)) require.Equal(t, expectedBody, doc.Body(base.TestCtx(t))) } + +// TestReplicatorUpdateHLVOnPut: +// - For purpose of testing the PutExistingRev code path +// - Put a doc on a active rest tester +// - Create replication and wait for the doc to be replicated to passive node +// - Assert on the HLV in the metadata of the replicated document +func TestReplicatorUpdateHLVOnPut(t *testing.T) { + + activeRT, passiveRT, remoteURL, teardown := rest.SetupSGRPeers(t) + defer teardown() + + // Grab the bucket UUIDs for both rest testers + activeBucketUUID, err := activeRT.GetDatabase().Bucket.UUID() + require.NoError(t, err) + + const rep = "replication" + + // Put a doc and assert on the HLV update in the sync data + resp := activeRT.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"source": "activeRT"}`) + rest.RequireStatus(t, resp, http.StatusCreated) + + syncData, err := activeRT.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS := base.HexCasToUint64(syncData.Cas) + + assert.Equal(t, activeBucketUUID, syncData.HLV.SourceID) + assert.Equal(t, uintCAS, syncData.HLV.Version) + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) + + // create the replication to push the doc to the passive node and wait for the doc to be replicated + activeRT.CreateReplication(rep, remoteURL, db.ActiveReplicatorTypePush, nil, false, db.ConflictResolverDefault) + + _, err = passiveRT.WaitForChanges(1, "/{{.keyspace}}/_changes", "", true) + require.NoError(t, err) + + // assert on the HLV update on the passive node + syncData, err = passiveRT.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") + assert.NoError(t, err) + uintCAS = base.HexCasToUint64(syncData.Cas) + + // TODO: assert that the SourceID and Verison pair are preserved correctly pending CBG-3211 + assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) +} diff --git a/rest/revocation_test.go b/rest/revocation_test.go index 35359e5f1d..cd58cea1d5 100644 --- a/rest/revocation_test.go +++ b/rest/revocation_test.go @@ -2223,190 +2223,195 @@ func TestReplicatorRevocationsFromZero(t *testing.T) { func TestRevocationMessage(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() - - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btcRunner := NewBlipTesterClientRunner(t) + const doc1ID = "doc1" - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := rt.PutDoc("doc", `{"channels": "A"}`) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - require.NoError(t, rt.WaitForPendingChanges()) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Start pull - err = btc.StartOneshotPull() - assert.NoError(t, err) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := btc.rt.PutDoc("doc", `{"channels": "A"}`) - // Wait for doc revision to come over - _, ok := btc.WaitForBlipRevMessage("doc", version) - require.True(t, ok) + require.NoError(t, btc.rt.WaitForPendingChanges()) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Start pull + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - const doc1ID = "doc1" - version = rt.PutDoc(doc1ID, `{"channels": "!"}`) + // Wait for doc revision to come over + _, ok := btcRunner.WaitForBlipRevMessage(btc.id, "doc", version) + require.True(t, ok) - revocationTester.fillToSeq(10) - version = rt.UpdateDoc(doc1ID, version, "{}") + // Remove role from user + revocationTester.removeRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) + version = btc.rt.PutDoc(doc1ID, `{"channels": "!"}`) - // Start a pull since 5 to receive revocation and removal - err = btc.StartPullSince("false", "5", "false") - assert.NoError(t, err) + revocationTester.fillToSeq(10) + version = btc.rt.UpdateDoc(doc1ID, version, "{}") - // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here - _, found := btc.WaitForVersion(doc1ID, version) - require.True(t, found) - - messages := btc.pullReplication.GetMessages() - - testCases := []struct { - Name string - DocID string - ExpectedDeleted int64 - }{ - { - Name: "Revocation", - DocID: "doc", - ExpectedDeleted: int64(2), - }, - { - Name: "Removed", - DocID: "doc1", - ExpectedDeleted: int64(4), - }, - } + require.NoError(t, btc.rt.WaitForPendingChanges()) - for _, testCase := range testCases { - t.Run(testCase.Name, func(t *testing.T) { - // Verify the deleted property in the changes message is "2" this indicated a revocation - for _, msg := range messages { - if msg.Properties[db.BlipProfile] == db.MessageChanges { - var changesMessages [][]interface{} - err = msg.ReadJSONBody(&changesMessages) - if err != nil { - continue - } + // Start a pull since 5 to receive revocation and removal + err = btcRunner.StartPullSince(btc.id, "false", "5", "false") + assert.NoError(t, err) - if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { - continue - } + // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, version) + require.True(t, found) + + messages := btc.pullReplication.GetMessages() + + testCases := []struct { + Name string + DocID string + ExpectedDeleted int64 + }{ + { + Name: "Revocation", + DocID: "doc", + ExpectedDeleted: int64(2), + }, + { + Name: "Removed", + DocID: "doc1", + ExpectedDeleted: int64(4), + }, + } - criteriaMet := false - for _, changesMessage := range changesMessages { - castedNum, ok := changesMessage[3].(json.Number) - if !ok { + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + // Verify the deleted property in the changes message is "2" this indicated a revocation + for _, msg := range messages { + if msg.Properties[db.BlipProfile] == db.MessageChanges { + var changesMessages [][]interface{} + err = msg.ReadJSONBody(&changesMessages) + if err != nil { continue } - intDeleted, err := castedNum.Int64() - if err != nil { + + if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { continue } - if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { - criteriaMet = true - break + + criteriaMet := false + for _, changesMessage := range changesMessages { + castedNum, ok := changesMessage[3].(json.Number) + if !ok { + continue + } + intDeleted, err := castedNum.Int64() + if err != nil { + continue + } + if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { + criteriaMet = true + break + } } - } - assert.True(t, criteriaMet) + assert.True(t, criteriaMet) + } } - } - }) - } - - assert.NoError(t, err) + }) + } + assert.NoError(t, err) + }) } func TestRevocationNoRev(t *testing.T) { defer db.SuspendSequenceBatching()() - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + const docID = "doc" + const waitMarkerID = "docmarker" + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}`) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := btc.rt.PutDoc(docID, `{"channels": "A"}`) - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + require.NoError(t, btc.rt.WaitForPendingChanges()) + firstOneShotSinceSeq := btc.rt.GetDocumentSequence("doc") - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Remove role from user + revocationTester.removeRole("user", "foo") - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + _ = btc.rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + waitMarkerVersion := btc.rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, btc.rt.WaitForPendingChanges()) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) - messages := btc.pullReplication.GetMessages() + messages := btc.pullReplication.GetMessages() - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - require.NoError(t, err) - require.Len(t, messageBody, 2) - require.Len(t, messageBody[0], 4) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + require.NoError(t, err) + require.Len(t, messageBody, 2) + require.Len(t, messageBody[0], 4) - deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - require.NoError(t, err) + deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + require.NoError(t, err) - assert.Equal(t, deletedFlag, int64(2)) + assert.Equal(t, deletedFlag, int64(2)) + }) } func TestRevocationGetSyncDataError(t *testing.T) { @@ -2414,106 +2419,111 @@ func TestRevocationGetSyncDataError(t *testing.T) { var throw bool base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) // Two callbacks to cover usage with CBS/Xattrs and without - revocationTester, rt := InitScenario( - t, &RestTesterConfig{ - leakyBucketConfig: &base.LeakyBucketConfig{ - GetWithXattrCallback: func(key string) error { - return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") - }, GetRawCallback: func(key string) error { - if throw { - return fmt.Errorf("Leaky Bucket GetRawCallback Error") - } - return nil - }, + rtConfig := &RestTesterConfig{ + leakyBucketConfig: &base.LeakyBucketConfig{ + GetWithXattrCallback: func(key string) error { + return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") + }, GetRawCallback: func(key string) error { + if throw { + return fmt.Errorf("Leaky Bucket GetRawCallback Error") + } + return nil }, }, - ) - - defer rt.Close() + } - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + const docID = "doc" + const waitMarkerID = "docmarker" + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, rtConfig) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}}`) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := btc.rt.PutDoc(docID, `{"channels": "A"}}`) - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + require.NoError(t, btc.rt.WaitForPendingChanges()) + firstOneShotSinceSeq := btc.rt.GetDocumentSequence("doc") - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) - throw = true - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + throw = true + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Remove role from user + revocationTester.removeRole("user", "foo") - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + _ = btc.rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + waitMarkerVersion := btc.rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, btc.rt.WaitForPendingChanges()) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) + }) } // Regression test for CBG-2183. func TestBlipRevokeNonExistentRole(t *testing.T) { - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: false, - }) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) + rtConfig := &RestTesterConfig{ + GuestEnabled: false, + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + bt := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bilbo", + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer bt.Close() - // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) - res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) - RequireStatus(t, res, http.StatusCreated) + collection := bt.rt.GetSingleTestDatabaseCollection() - // Create a doc so we have something to replicate - res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) - RequireStatus(t, res, http.StatusCreated) + // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) + res := bt.rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", bt.rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) + RequireStatus(t, res, http.StatusOK) - // 3. Update the user to not reference one of the roles (update to ['a1'], for example) - // [also revoke channel c1 so the doc shows up in the revocation queries] - res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) - RequireStatus(t, res, http.StatusOK) + // Create a doc so we have something to replicate + res = bt.rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) + RequireStatus(t, res, http.StatusCreated) - // 4. Try to sync - bt, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bilbo", - SendRevocations: true, - }) - require.NoError(t, err) - defer bt.Close() + // 3. Update the user to not reference one of the roles (update to ['a1'], for example) + // [also revoke channel c1 so the doc shows up in the revocation queries] + res = bt.rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", bt.rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) + RequireStatus(t, res, http.StatusOK) - require.NoError(t, bt.StartPull()) + // 4. Try to sync + require.NoError(t, btcRunner.StartPull(bt.id)) - // in the failing case we'll panic before hitting this - base.RequireWaitForStat(t, func() int64 { - return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() - }, 1) + // in the failing case we'll panic before hitting this + base.RequireWaitForStat(t, func() int64 { + return bt.rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() + }, 1) + }) } func TestReplicatorSwitchPurgeNoReset(t *testing.T) { diff --git a/rest/user_api_test.go b/rest/user_api_test.go index fc97dbbcdf..fd5d979c21 100644 --- a/rest/user_api_test.go +++ b/rest/user_api_test.go @@ -1479,7 +1479,7 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { _, err := dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData) assert.NoError(t, err) - docRev, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) + docRev, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) assert.Equal(t, 0, len(docRev.Channels.ToArray())) assert.Equal(t, syncData.CurrentRev, docRev.RevID) @@ -1499,7 +1499,7 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData2) assert.NoError(t, err) - docRev2, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) + docRev2, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) assert.Equal(t, syncData2.CurrentRev, docRev2.RevID) From 8fd554e177fcc8e24ee6f8d88f3c67c18a113e0c Mon Sep 17 00:00:00 2001 From: Gregory Newman-Smith <109068393+gregns1@users.noreply.github.com> Date: Mon, 13 Nov 2023 14:28:48 +0000 Subject: [PATCH 08/14] Revert "CBG-3576: BlipTestClient support for HLV and rev tree modes (#6567)" (#6573) This reverts commit a398881cc0e8a3d1ac652b0d03f67897a52772b6. --- db/access_test.go | 8 +- db/attachment_test.go | 28 +- db/blip_handler.go | 4 +- db/change_cache.go | 2 +- db/changes_test.go | 4 +- db/crud.go | 90 +- db/crud_test.go | 124 +-- db/database.go | 16 - db/database_test.go | 216 ++-- db/document.go | 82 +- db/document_test.go | 101 -- db/hybrid_logical_vector.go | 49 +- db/import.go | 3 +- db/query_test.go | 30 +- db/revision_cache_bypass.go | 40 +- db/revision_cache_interface.go | 100 +- db/revision_cache_lru.go | 286 +---- db/revision_cache_test.go | 381 +------ db/revision_test.go | 2 +- rest/api_test.go | 91 -- rest/attachment_test.go | 367 +++---- rest/blip_api_attachment_test.go | 891 ++++++++-------- rest/blip_api_collections_test.go | 552 +++++----- rest/blip_api_crud_test.go | 962 ++++++++++++----- rest/blip_api_delta_sync_test.go | 1343 ++++++++++++------------ rest/blip_api_no_race_test.go | 123 ++- rest/blip_client_test.go | 209 ++-- rest/bulk_api.go | 2 +- rest/doc_api.go | 4 +- rest/importtest/import_test.go | 3 - rest/replicatortest/replicator_test.go | 43 - rest/revocation_test.go | 448 ++++---- rest/user_api_test.go | 4 +- 33 files changed, 2904 insertions(+), 3704 deletions(-) diff --git a/db/access_test.go b/db/access_test.go index 48ee595fc7..9b23710cb5 100644 --- a/db/access_test.go +++ b/db/access_test.go @@ -44,7 +44,7 @@ func TestDynamicChannelGrant(t *testing.T) { // Create a document in channel chan1 doc1Body := Body{"channel": "chan1", "greeting": "hello"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc1", doc1Body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc1", doc1Body, []string{"1-a"}, false) require.NoError(t, err) // Verify user cannot access document @@ -54,7 +54,7 @@ func TestDynamicChannelGrant(t *testing.T) { // Write access granting document grantingBody := Body{"type": "setaccess", "owner": "user1", "channel": "chan1"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant1", grantingBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant1", grantingBody, []string{"1-a"}, false) require.NoError(t, err) // Verify reloaded user can access document @@ -66,12 +66,12 @@ func TestDynamicChannelGrant(t *testing.T) { // Create a document in channel chan2 doc2Body := Body{"channel": "chan2", "greeting": "hello"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc2", doc2Body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "doc2", doc2Body, []string{"1-a"}, false) require.NoError(t, err) // Write access granting document for chan2 (tests invalidation when channels/inval_seq exists) grantingBody = Body{"type": "setaccess", "owner": "user1", "channel": "chan2"} - _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant2", grantingBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = dbCollection.PutExistingRevWithBody(ctx, "grant2", grantingBody, []string{"1-a"}, false) require.NoError(t, err) // Verify user can now access both documents diff --git a/db/attachment_test.go b/db/attachment_test.go index 4574603f20..a29ccfa7f4 100644 --- a/db/attachment_test.go +++ b/db/attachment_test.go @@ -72,7 +72,7 @@ func TestBackupOldRevisionWithAttachments(t *testing.T) { var rev2Body Body rev2Data := `{"test": true, "updated": true, "_attachments": {"hello.txt": {"stub": true, "revpos": 1}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, docID, rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, docID, rev2Body, []string{"2-abc", rev1ID}, true) require.NoError(t, err) rev2ID := "2-abc" @@ -200,7 +200,7 @@ func TestAttachments(t *testing.T) { rev2Bstr := `{"_attachments": {"bye.txt": {"stub":true,"revpos":1,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}, "_rev": "2-f000"}` var body2B Body assert.NoError(t, base.JSONUnmarshal([]byte(rev2Bstr), &body2B)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body2B, []string{"2-f000", rev1id}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body2B, []string{"2-f000", rev1id}, false) assert.NoError(t, err, "Couldn't update document") } @@ -284,7 +284,7 @@ func TestAttachmentCASRetryAfterNewAttachment(t *testing.T) { rev2Data := `{"prop1":"value2", "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true) require.NoError(t, err) log.Printf("Done creating rev 2 for key %s", key) @@ -315,7 +315,7 @@ func TestAttachmentCASRetryAfterNewAttachment(t *testing.T) { var rev3Body Body rev3Data := `{"prop1":"value3", "_attachments": {"hello.txt": {"revpos":2,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev3Data), &rev3Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true) require.NoError(t, err) log.Printf("rev 3 done") @@ -347,7 +347,7 @@ func TestAttachmentCASRetryDuringNewAttachment(t *testing.T) { rev2Data := `{"prop1":"value2"}` require.NoError(t, base.JSONUnmarshal([]byte(rev2Data), &rev2Body)) collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2Body, []string{"2-abc", rev1ID}, true) require.NoError(t, err) log.Printf("Done creating rev 2 for key %s", key) @@ -378,7 +378,7 @@ func TestAttachmentCASRetryDuringNewAttachment(t *testing.T) { var rev3Body Body rev3Data := `{"prop1":"value3", "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}` require.NoError(t, base.JSONUnmarshal([]byte(rev3Data), &rev3Body)) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3Body, []string{"3-abc", "2-abc", rev1ID}, true) require.NoError(t, err) log.Printf("rev 3 done") @@ -567,7 +567,7 @@ func TestRetrieveAncestorAttachments(t *testing.T) { // Create document (rev 1) text := `{"key": "value", "version": "1a"}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) - doc, revID, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, revID, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) @@ -575,49 +575,49 @@ func TestRetrieveAncestorAttachments(t *testing.T) { text = `{"key": "value", "version": "2a", "_attachments": {"att1.txt": {"data": "YXR0MS50eHQ="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-a", "2-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "4a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "5a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-a", "4-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-a", "4-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "6a", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"6-a", "5-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"6-a", "5-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3b", "type": "pruned"}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) text = `{"key": "value", "version": "3b", "_attachments": {"att1.txt": {"stub":true,"revpos":2,"digest":"sha1-gwwPApfQR9bzBKpqoEYwFmKp98A="}}}` assert.NoError(t, base.JSONUnmarshal([]byte(text), &body)) body[BodyRev] = revID - doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"3-b", "2-a"}, false) assert.NoError(t, err, "Couldn't create document") log.Printf("doc: %v", doc) } diff --git a/db/blip_handler.go b/db/blip_handler.go index 407a41b9bf..79a21c1c8b 100644 --- a/db/blip_handler.go +++ b/db/blip_handler.go @@ -1183,9 +1183,9 @@ func (bh *blipHandler) processRev(rq *blip.Message, stats *processRevStats) (err // bh.conflictResolver != nil represents an active SGR2 and BLIPClientTypeSGR2 represents a passive SGR2 forceAllowConflictingTombstone := newDoc.Deleted && (bh.conflictResolver != nil || bh.clientType == BLIPClientTypeSGR2) if bh.conflictResolver != nil { - _, _, err = bh.collection.PutExistingRevWithConflictResolution(bh.loggingCtx, newDoc, history, true, bh.conflictResolver, forceAllowConflictingTombstone, rawBucketDoc, ExistingVersionWithUpdateToHLV) + _, _, err = bh.collection.PutExistingRevWithConflictResolution(bh.loggingCtx, newDoc, history, true, bh.conflictResolver, forceAllowConflictingTombstone, rawBucketDoc) } else { - _, _, err = bh.collection.PutExistingRev(bh.loggingCtx, newDoc, history, revNoConflicts, forceAllowConflictingTombstone, rawBucketDoc, ExistingVersionWithUpdateToHLV) + _, _, err = bh.collection.PutExistingRev(bh.loggingCtx, newDoc, history, revNoConflicts, forceAllowConflictingTombstone, rawBucketDoc) } if err != nil { return err diff --git a/db/change_cache.go b/db/change_cache.go index 8f42deeb1e..82f779f152 100644 --- a/db/change_cache.go +++ b/db/change_cache.go @@ -497,7 +497,7 @@ func (c *changeCache) DocChanged(event sgbucket.FeedEvent) { // Now add the entry for the new doc revision: if len(rawUserXattr) > 0 { - collection.revisionCache.RemoveWithRev(docID, syncData.CurrentRev) + collection.revisionCache.Remove(docID, syncData.CurrentRev) } change := &LogEntry{ Sequence: syncData.Sequence, diff --git a/db/changes_test.go b/db/changes_test.go index 1be2a18359..95dc721c66 100644 --- a/db/changes_test.go +++ b/db/changes_test.go @@ -478,14 +478,14 @@ func BenchmarkChangesFeedDocUnmarshalling(b *testing.B) { // Create child rev 1 docBody["child"] = "A" - _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-A", revId}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-A", revId}, false) if err != nil { b.Fatalf("Error creating child1 rev: %v", err) } // Create child rev 2 docBody["child"] = "B" - _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-B", revId}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, docid, docBody, []string{"2-B", revId}, false) if err != nil { b.Fatalf("Error creating child2 rev: %v", err) } diff --git a/db/crud.go b/db/crud.go index fabe70e218..586a9ca03d 100644 --- a/db/crud.go +++ b/db/crud.go @@ -317,7 +317,7 @@ func (db *DatabaseCollectionWithUser) getRev(ctx context.Context, docid, revid s if revid != "" { // Get a specific revision body and history from the revision cache // (which will load them if necessary, by calling revCacheLoader, above) - revision, err = db.revisionCache.GetWithRev(ctx, docid, revid, includeBody, RevCacheOmitDelta) + revision, err = db.revisionCache.Get(ctx, docid, revid, includeBody, RevCacheOmitDelta) } else { // No rev ID given, so load active revision revision, err = db.revisionCache.GetActive(ctx, docid, includeBody) @@ -381,7 +381,7 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR return nil, nil, nil } - fromRevision, err := db.revisionCache.GetWithRev(ctx, docID, fromRevID, RevCacheOmitBody, RevCacheIncludeDelta) + fromRevision, err := db.revisionCache.Get(ctx, docID, fromRevID, RevCacheOmitBody, RevCacheIncludeDelta) // If the fromRevision is a removal cache entry (no body), but the user has access to that removal, then just // return 404 missing to indicate that the body of the revision is no longer available. @@ -421,7 +421,7 @@ func (db *DatabaseCollectionWithUser) GetDelta(ctx context.Context, docID, fromR // db.DbStats.StatsDeltaSync().Add(base.StatKeyDeltaCacheMisses, 1) db.dbStats().DeltaSync().DeltaCacheMiss.Add(1) - toRevision, err := db.revisionCache.GetWithRev(ctx, docID, toRevID, RevCacheOmitBody, RevCacheIncludeDelta) + toRevision, err := db.revisionCache.Get(ctx, docID, toRevID, RevCacheOmitBody, RevCacheIncludeDelta) if err != nil { return nil, nil, err } @@ -866,33 +866,6 @@ func (db *DatabaseCollectionWithUser) OnDemandImportForWrite(ctx context.Context return nil } -// updateHLV updates the HLV in the sync data appropriately based on what type of document update event we are encountering -func (db *DatabaseCollectionWithUser) updateHLV(d *Document, docUpdateEvent DocUpdateType) (*Document, error) { - - if d.HLV == nil { - d.HLV = &HybridLogicalVector{} - } - switch docUpdateEvent { - case ExistingVersion: - // preserve any other logic on the HLV that has been done by the client, only update to cvCAS will be needed - d.HLV.CurrentVersionCAS = hlvExpandMacroCASValue - case Import: - // work to be done to decide if the VV needs updating here, pending CBG-3503 - case NewVersion, ExistingVersionWithUpdateToHLV: - // add a new entry to the version vector - newVVEntry := CurrentVersionVector{} - newVVEntry.SourceID = db.dbCtx.BucketUUID - newVVEntry.VersionCAS = hlvExpandMacroCASValue - err := d.SyncData.HLV.AddVersion(newVVEntry) - if err != nil { - return nil, err - } - // update the cvCAS on the SGWrite event too - d.HLV.CurrentVersionCAS = hlvExpandMacroCASValue - } - return d, nil -} - // Updates or creates a document. // The new body's BodyRev property must match the current revision's, if any. func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, body Body) (newRevID string, doc *Document, err error) { @@ -932,9 +905,8 @@ func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, bod return "", nil, err } - docUpdateEvent := NewVersion allowImport := db.UseXattrs() - doc, newRevID, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, expiry, nil, docUpdateEvent, nil, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + doc, newRevID, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, expiry, nil, nil, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { var isSgWrite bool var crc32Match bool @@ -1042,8 +1014,8 @@ func (db *DatabaseCollectionWithUser) Put(ctx context.Context, docid string, bod } // Adds an existing revision to a document along with its history (list of rev IDs.) -func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, forceAllConflicts bool, existingDoc *sgbucket.BucketDocument, docUpdateEvent DocUpdateType) (doc *Document, newRevID string, err error) { - return db.PutExistingRevWithConflictResolution(ctx, newDoc, docHistory, noConflicts, nil, forceAllConflicts, existingDoc, docUpdateEvent) +func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, forceAllConflicts bool, existingDoc *sgbucket.BucketDocument) (doc *Document, newRevID string, err error) { + return db.PutExistingRevWithConflictResolution(ctx, newDoc, docHistory, noConflicts, nil, forceAllConflicts, existingDoc) } // PutExistingRevWithConflictResolution Adds an existing revision to a document along with its history (list of rev IDs.) @@ -1051,7 +1023,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRev(ctx context.Context, newDoc // 1. If noConflicts == false, the revision will be added to the rev tree as a conflict // 2. If noConflicts == true and a conflictResolverFunc is not provided, a 409 conflict error will be returned // 3. If noConflicts == true and a conflictResolverFunc is provided, conflicts will be resolved and the result added to the document. -func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, conflictResolver *ConflictResolver, forceAllowConflictingTombstone bool, existingDoc *sgbucket.BucketDocument, docUpdateEvent DocUpdateType) (doc *Document, newRevID string, err error) { +func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx context.Context, newDoc *Document, docHistory []string, noConflicts bool, conflictResolver *ConflictResolver, forceAllowConflictingTombstone bool, existingDoc *sgbucket.BucketDocument) (doc *Document, newRevID string, err error) { newRev := docHistory[0] generation, _ := ParseRevID(ctx, newRev) if generation < 0 { @@ -1059,7 +1031,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx c } allowImport := db.UseXattrs() - doc, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, newDoc.DocExpiry, nil, docUpdateEvent, existingDoc, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + doc, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, allowImport, newDoc.DocExpiry, nil, existingDoc, func(doc *Document) (resultDoc *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { // (Be careful: this block can be invoked multiple times if there are races!) var isSgWrite bool @@ -1158,7 +1130,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithConflictResolution(ctx c return doc, newRev, err } -func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context, docid string, body Body, docHistory []string, noConflicts bool, docUpdateEvent DocUpdateType) (doc *Document, newRev string, err error) { +func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context, docid string, body Body, docHistory []string, noConflicts bool) (doc *Document, newRev string, err error) { err = validateAPIDocUpdate(body) if err != nil { return nil, "", err @@ -1183,7 +1155,7 @@ func (db *DatabaseCollectionWithUser) PutExistingRevWithBody(ctx context.Context newDoc.UpdateBody(body) - doc, newRevID, putExistingRevErr := db.PutExistingRev(ctx, newDoc, docHistory, noConflicts, false, nil, docUpdateEvent) + doc, newRevID, putExistingRevErr := db.PutExistingRev(ctx, newDoc, docHistory, noConflicts, false, nil) if putExistingRevErr != nil { return nil, "", putExistingRevErr @@ -1859,7 +1831,7 @@ type updateAndReturnDocCallback func(*Document) (resultDoc *Document, resultAtta // 1. Receive the updated document body in the response // 2. Specify the existing document body/xattr/cas, to avoid initial retrieval of the doc in cases that the current contents are already known (e.g. import). // On cas failure, the document will still be reloaded from the bucket as usual. -func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, docid string, allowImport bool, expiry uint32, opts *sgbucket.MutateInOptions, docUpdateEvent DocUpdateType, existingDoc *sgbucket.BucketDocument, callback updateAndReturnDocCallback) (doc *Document, newRevID string, err error) { +func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, docid string, allowImport bool, expiry uint32, opts *sgbucket.MutateInOptions, existingDoc *sgbucket.BucketDocument, callback updateAndReturnDocCallback) (doc *Document, newRevID string, err error) { key := realDocID(docid) if key == "" { @@ -1958,14 +1930,6 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do return } - // update the HLV values - doc, err = db.updateHLV(doc, docUpdateEvent) - if err != nil { - return - } - // update the mutate in options based on the above logic - updatedSpec = doc.SyncData.HLV.computeMacroExpansions() - deleteDoc = currentRevFromHistory.Deleted // Return the new raw document value for the bucket to store. @@ -1986,7 +1950,7 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do // Prior to saving doc, remove the revision in cache if createNewRevIDSkipped { - db.revisionCache.RemoveWithRev(doc.ID, doc.CurrentRev) + db.revisionCache.Remove(doc.ID, doc.CurrentRev) } base.DebugfCtx(ctx, base.KeyCRUD, "Saving doc (seq: #%d, id: %v rev: %v)", doc.Sequence, base.UD(doc.ID), doc.CurrentRev) @@ -2000,8 +1964,6 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do } } else if doc != nil { doc.Cas = casOut - // update the doc's HLV defined post macro expansion - doc = postWriteUpdateHLV(doc, casOut) } } @@ -2059,7 +2021,6 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do Expiry: doc.Expiry, Deleted: doc.History[newRevID].Deleted, _shallowCopyBody: storedDoc.Body(ctx), - CV: &CurrentVersionVector{VersionCAS: doc.HLV.Version, SourceID: doc.HLV.SourceID}, } if createNewRevIDSkipped { @@ -2122,19 +2083,6 @@ func (db *DatabaseCollectionWithUser) updateAndReturnDoc(ctx context.Context, do return doc, newRevID, nil } -func postWriteUpdateHLV(doc *Document, casOut uint64) *Document { - if doc.HLV == nil { - return doc - } - if doc.HLV.Version == hlvExpandMacroCASValue { - doc.HLV.Version = casOut - } - if doc.HLV.CurrentVersionCAS == hlvExpandMacroCASValue { - doc.HLV.CurrentVersionCAS = casOut - } - return doc -} - func getAttachmentIDsForLeafRevisions(ctx context.Context, db *DatabaseCollectionWithUser, doc *Document, newRevID string) (map[string]struct{}, error) { leafAttachments := make(map[string]struct{}) @@ -2631,10 +2579,8 @@ func (db *DatabaseCollectionWithUser) CheckProposedRev(ctx context.Context, doci } const ( - xattrMacroCas = "cas" - xattrMacroValueCrc32c = "value_crc32c" - versionVectorVrsMacro = "_vv.vrs" - versionVectorCVCASMacro = "_vv.cvCas" + xattrMacroCas = "cas" + xattrMacroValueCrc32c = "value_crc32c" ) func macroExpandSpec(xattrName string) []sgbucket.MacroExpansionSpec { @@ -2653,11 +2599,3 @@ func xattrCasPath(xattrKey string) string { func xattrCrc32cPath(xattrKey string) string { return xattrKey + "." + xattrMacroValueCrc32c } - -func xattrCurrentVersionPath(xattrKey string) string { - return xattrKey + "." + versionVectorVrsMacro -} - -func xattrCurrentVersionCASPath(xattrKey string) string { - return xattrKey + "." + versionVectorCVCASMacro -} diff --git a/db/crud_test.go b/db/crud_test.go index 38ca667b3d..ea8f99f355 100644 --- a/db/crud_test.go +++ b/db/crud_test.go @@ -75,7 +75,7 @@ func TestRevisionCacheLoad(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Flush the cache @@ -116,7 +116,7 @@ func TestHasAttachmentsFlag(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -127,7 +127,7 @@ func TestHasAttachmentsFlag(t *testing.T) { rev2a_body := unjson(`{"_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -153,7 +153,7 @@ func TestHasAttachmentsFlag(t *testing.T) { rev2b_body := unjson(`{"_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -251,7 +251,7 @@ func TestHasAttachmentsFlagForLegacyAttachments(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create rev 2-a with legacy attachment. @@ -280,7 +280,7 @@ func TestHasAttachmentsFlagForLegacyAttachments(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -315,7 +315,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -326,7 +326,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2a_body := Body{} rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -345,7 +345,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -388,7 +388,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3b_body := Body{} rev3b_body["version"] = "3b" rev3b_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false) rev3b_body[BodyId] = doc.ID rev3b_body[BodyRev] = newRev rev3b_body[BodyDeleted] = true @@ -425,7 +425,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev2c_body := Body{} rev2c_body["key1"] = prop_1000_bytes rev2c_body["version"] = "2c" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"2-c", "1-a"}, false) rev2c_body[BodyId] = doc.ID rev2c_body[BodyRev] = newRev assert.NoError(t, err, "add 2-c") @@ -447,7 +447,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3c_body["version"] = "3c" rev3c_body["key1"] = prop_1000_bytes rev3c_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-c"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-c"}, false) rev3c_body[BodyId] = doc.ID rev3c_body[BodyRev] = newRev rev3c_body[BodyDeleted] = true @@ -476,7 +476,7 @@ func TestRevisionStorageConflictAndTombstones(t *testing.T) { rev3a_body := Body{} rev3a_body["key1"] = prop_1000_bytes rev3a_body["version"] = "3a" - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2c_body, []string{"3-a", "2-a"}, false) assert.NoError(t, err, "add 3-a") revTree, err = getRevTreeList(ctx, collection.dataStore, "doc1", db.UseXattrs()) @@ -499,7 +499,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { // Create rev 2-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -510,7 +510,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev2a_body := Body{} rev2a_body["key1"] = prop_1000_bytes rev2a_body["version"] = "2a" - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -529,7 +529,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev2b_body := Body{} rev2b_body["key1"] = prop_1000_bytes rev2b_body["version"] = "2b" - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -574,7 +574,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { rev3b_body["version"] = "3b" rev3b_body["key1"] = prop_1000_bytes rev3b_body[BodyDeleted] = true - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b"}, false) rev3b_body[BodyId] = doc.ID rev3b_body[BodyRev] = newRev rev3b_body[BodyDeleted] = true @@ -609,17 +609,17 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { activeRevBody := Body{} activeRevBody["version"] = "...a" activeRevBody["key1"] = prop_1000_bytes - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"3-a", "2-a"}, false) assert.NoError(t, err, "add 3-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"4-a", "3-a"}, false) assert.NoError(t, err, "add 4-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"5-a", "4-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"5-a", "4-a"}, false) assert.NoError(t, err, "add 5-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"6-a", "5-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"6-a", "5-a"}, false) assert.NoError(t, err, "add 6-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"7-a", "6-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"7-a", "6-a"}, false) assert.NoError(t, err, "add 7-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"8-a", "7-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"8-a", "7-a"}, false) assert.NoError(t, err, "add 8-a") // Verify that 3-b is still present at this point @@ -628,7 +628,7 @@ func TestRevisionStoragePruneTombstone(t *testing.T) { assert.NoError(t, err, "Rev 3-b should still exist") // Add one more rev that triggers pruning since gen(9-3) > revsLimit - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"9-a", "8-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", activeRevBody, []string{"9-a", "8-a"}, false) assert.NoError(t, err, "add 9-a") // Verify that 3-b has been pruned @@ -657,7 +657,7 @@ func TestOldRevisionStorage(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "version": "1a", "large": prop_1000_bytes} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) require.NoError(t, err, "add 1-a") // Create rev 2-a @@ -666,7 +666,7 @@ func TestOldRevisionStorage(t *testing.T) { // 2-a log.Printf("Create rev 2-a") rev2a_body := Body{"key1": "value2", "version": "2a", "large": prop_1000_bytes} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev @@ -686,7 +686,7 @@ func TestOldRevisionStorage(t *testing.T) { // 3-a log.Printf("Create rev 3-a") rev3a_body := Body{"key1": "value2", "version": "3a", "large": prop_1000_bytes} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false) require.NoError(t, err, "add 3-a") rev3a_body[BodyId] = doc.ID rev3a_body[BodyRev] = newRev @@ -705,7 +705,7 @@ func TestOldRevisionStorage(t *testing.T) { // 3-a log.Printf("Create rev 2-b") rev2b_body := Body{"key1": "value2", "version": "2b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) require.NoError(t, err, "add 2-b") // Retrieve the document: @@ -728,7 +728,7 @@ func TestOldRevisionStorage(t *testing.T) { // 6-a log.Printf("Create rev 6-a") rev6a_body := Body{"key1": "value2", "version": "6a", "large": prop_1000_bytes} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false) require.NoError(t, err, "add 6-a") rev6a_body[BodyId] = doc.ID rev6a_body[BodyRev] = newRev @@ -753,7 +753,7 @@ func TestOldRevisionStorage(t *testing.T) { // 6-a log.Printf("Create rev 3-b") rev3b_body := Body{"key1": "value2", "version": "3b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false) require.NoError(t, err, "add 3-b") // Same again and again @@ -772,12 +772,12 @@ func TestOldRevisionStorage(t *testing.T) { log.Printf("Create rev 3-c") rev3c_body := Body{"key1": "value2", "version": "3c", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) require.NoError(t, err, "add 3-c") log.Printf("Create rev 3-d") rev3d_body := Body{"key1": "value2", "version": "3d", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3d_body, []string{"3-d", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3d_body, []string{"3-d", "2-b", "1-a"}, false) require.NoError(t, err, "add 3-d") // Create new winning revision on 'b' branch. Triggers movement of 6-a to inline storage. Force cas retry, check document contents @@ -796,7 +796,7 @@ func TestOldRevisionStorage(t *testing.T) { // 7-b log.Printf("Create rev 7-b") rev7b_body := Body{"key1": "value2", "version": "7b", "large": prop_1000_bytes} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev7b_body, []string{"7-b", "6-b", "5-b", "4-b", "3-b"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev7b_body, []string{"7-b", "6-b", "5-b", "4-b", "3-b"}, false) require.NoError(t, err, "add 7-b") } @@ -817,7 +817,7 @@ func TestOldRevisionStorageError(t *testing.T) { // Create rev 1-a log.Printf("Create rev 1-a") body := Body{"key1": "value1", "v": "1a"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create rev 2-a @@ -826,7 +826,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 2-a log.Printf("Create rev 2-a") rev2a_body := Body{"key1": "value2", "v": "2a"} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", rev2a_body, []string{"2-a", "1-a"}, false) rev2a_body[BodyId] = doc.ID rev2a_body[BodyRev] = newRev assert.NoError(t, err, "add 2-a") @@ -845,7 +845,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 3-a log.Printf("Create rev 3-a") rev3a_body := Body{"key1": "value2", "v": "3a"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3a_body, []string{"3-a", "2-a", "1-a"}, false) rev3a_body[BodyId] = doc.ID rev3a_body[BodyRev] = newRev assert.NoError(t, err, "add 3-a") @@ -858,7 +858,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 3-a log.Printf("Create rev 2-b") rev2b_body := Body{"key1": "value2", "v": "2b"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev2b_body, []string{"2-b", "1-a"}, false) rev2b_body[BodyId] = doc.ID rev2b_body[BodyRev] = newRev assert.NoError(t, err, "add 2-b") @@ -883,7 +883,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 6-a log.Printf("Create rev 6-a") rev6a_body := Body{"key1": "value2", "v": "6a"} - doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err = collection.PutExistingRevWithBody(ctx, "doc1", rev6a_body, []string{"6-a", "5-a", "4-a", "3-a"}, false) rev6a_body[BodyId] = doc.ID rev6a_body[BodyRev] = newRev assert.NoError(t, err, "add 6-a") @@ -909,7 +909,7 @@ func TestOldRevisionStorageError(t *testing.T) { // 6-a log.Printf("Create rev 3-b") rev3b_body := Body{"key1": "value2", "v": "3b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3b_body, []string{"3-b", "2-b", "1-a"}, false) assert.NoError(t, err, "add 3-b") // Same again @@ -929,7 +929,7 @@ func TestOldRevisionStorageError(t *testing.T) { log.Printf("Create rev 3-c") rev3c_body := Body{"key1": "value2", "v": "3c"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) assert.NoError(t, err, "add 3-c") } @@ -946,7 +946,7 @@ func TestLargeSequence(t *testing.T) { // Write a doc via SG body := Body{"key1": "largeSeqTest"} - _, _, err := collection.PutExistingRevWithBody(ctx, "largeSeqDoc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "largeSeqDoc", body, []string{"1-a"}, false) assert.NoError(t, err, "add largeSeqDoc") syncData, err := collection.GetDocSyncData(ctx, "largeSeqDoc") @@ -1021,7 +1021,7 @@ func TestMalformedRevisionStorageRecovery(t *testing.T) { // 6-a log.Printf("Attempt to create rev 3-c") rev3c_body := Body{"key1": "value2", "v": "3c"} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", rev3c_body, []string{"3-c", "2-b", "1-a"}, false) assert.NoError(t, err, "add 3-c") } @@ -1033,16 +1033,16 @@ func BenchmarkDatabaseGet1xRev(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar", "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) largeDoc := make([]byte, 1000000) longBody := Body{"val": string(largeDoc), "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false) var shortWithAttachmentsDataBody Body shortWithAttachmentsData := `{"test": true, "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}, "rev":"1-a"}` _ = base.JSONUnmarshal([]byte(shortWithAttachmentsData), &shortWithAttachmentsDataBody) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false) b.Run("ShortLatest", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1061,9 +1061,9 @@ func BenchmarkDatabaseGet1xRev(b *testing.B) { }) updateBody := Body{"rev": "2-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false) b.Run("ShortOld", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1090,16 +1090,16 @@ func BenchmarkDatabaseGetRev(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar", "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) largeDoc := make([]byte, 1000000) longBody := Body{"val": string(largeDoc), "rev": "1-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", longBody, []string{"1-a"}, false) var shortWithAttachmentsDataBody Body shortWithAttachmentsData := `{"test": true, "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}, "rev":"1-a"}` _ = base.JSONUnmarshal([]byte(shortWithAttachmentsData), &shortWithAttachmentsDataBody) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", shortWithAttachmentsDataBody, []string{"1-a"}, false) b.Run("ShortLatest", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1118,9 +1118,9 @@ func BenchmarkDatabaseGetRev(b *testing.B) { }) updateBody := Body{"rev": "2-a"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc2", updateBody, []string{"2-a", "1-a"}, false) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc3", updateBody, []string{"2-a", "1-a"}, false) b.Run("ShortOld", func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -1148,7 +1148,7 @@ func BenchmarkHandleRevDelta(b *testing.B) { collection := GetSingleDatabaseCollectionWithUser(b, db) body := Body{"foo": "bar"} - _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, _ = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) getDelta := func(newDoc *Document) { deltaSrcRev, _ := collection.GetRev(ctx, "doc1", "1-a", false, nil) @@ -1197,18 +1197,18 @@ func TestGetAvailableRevAttachments(t *testing.T) { // Create the very first revision of the document with attachment; let's call this as rev 1-a payload := `{"sku":"6213100","_attachments":{"camera.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - _, rev, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, rev, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"1-a"}, false) assert.NoError(t, err, "Couldn't create document") ancestor := rev // Ancestor revision // Create the second revision of the document with attachment reference; payload = `{"sku":"6213101","_attachments":{"camera.txt":{"stub":true,"revpos":1}}}` - _, rev, err = collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, rev, err = collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"2-a", "1-a"}, false) parent := rev // Immediate ancestor or parent revision assert.NoError(t, err, "Couldn't create document") payload = `{"sku":"6213102","_attachments":{"camera.txt":{"stub":true,"revpos":1}}}` - doc, _, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + doc, _, err := collection.PutExistingRevWithBody(ctx, "camera", unjson(payload), []string{"3-a", "2-a"}, false) assert.NoError(t, err, "Couldn't create document") // Get available attachments by immediate ancestor revision or parent revision @@ -1235,11 +1235,11 @@ func TestGet1xRevAndChannels(t *testing.T) { docId := "dd6d2dcc679d12b9430a9787bab45b33" payload := `{"sku":"6213100","_attachments":{"camera.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - doc1, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + doc1, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false) assert.NoError(t, err, "Couldn't create document") payload = `{"sku":"6213101","_attachments":{"lens.txt":{"data":"Q2Fub24gRU9TIDVEIE1hcmsgSVY="}}}` - doc2, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc2, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Couldn't create document") // Get the 1x revision from document with list revision enabled @@ -1298,7 +1298,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Create the first revision of the document docId := "356779a9a1696714480f57fa3fb66d4c" payload := `{"city":"Los Angeles"}` - doc, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, rev1, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"1-a"}, false) assert.NoError(t, err, "Couldn't create document") assert.NotEmpty(t, doc, "Document shouldn't be empty") assert.Equal(t, "1-a", rev1, "Provided input revision ID should be returned") @@ -1321,7 +1321,7 @@ func TestGet1xRevFromDoc(t *testing.T) { // Create the second revision of the document payload = `{"city":"Hollywood"}` - doc, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, rev2, err := collection.PutExistingRevWithBody(ctx, docId, unjson(payload), []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Couldn't create document") assert.NotEmpty(t, doc, "Document shouldn't be empty") assert.Equal(t, "2-a", rev2, "Provided input revision ID should be returned") diff --git a/db/database.go b/db/database.go index 036369f12b..7c1909a547 100644 --- a/db/database.go +++ b/db/database.go @@ -48,15 +48,6 @@ const ( DBCompactRunning ) -const ( - Import DocUpdateType = iota - NewVersion - ExistingVersion - ExistingVersionWithUpdateToHLV -) - -type DocUpdateType uint32 - const ( DefaultRevsLimitNoConflicts = 50 DefaultRevsLimitConflicts = 100 @@ -97,7 +88,6 @@ type DatabaseContext struct { MetadataStore base.DataStore // Storage for database metadata (anything that isn't an end-user's/customer's documents) Bucket base.Bucket // Storage BucketSpec base.BucketSpec // The BucketSpec - BucketUUID string // The bucket UUID for the bucket the database is created against BucketLock sync.RWMutex // Control Access to the underlying bucket object mutationListener changeListener // Caching feed listener ImportListener *importListener // Import feed listener @@ -406,11 +396,6 @@ func NewDatabaseContext(ctx context.Context, dbName string, bucket base.Bucket, metadataStore = bucket.DefaultDataStore() } - bucketUUID, err := bucket.UUID() - if err != nil { - return nil, err - } - // Register the cbgt pindex type for the configGroup RegisterImportPindexImpl(ctx, options.GroupID) @@ -419,7 +404,6 @@ func NewDatabaseContext(ctx context.Context, dbName string, bucket base.Bucket, UUID: cbgt.NewUUID(), MetadataStore: metadataStore, Bucket: bucket, - BucketUUID: bucketUUID, StartTime: time.Now(), autoImport: autoImport, Options: options, diff --git a/db/database_test.go b/db/database_test.go index f60d87d24b..f34123e791 100644 --- a/db/database_test.go +++ b/db/database_test.go @@ -294,7 +294,7 @@ func TestDatabase(t *testing.T) { body["key2"] = int64(4444) history := []string{"4-four", "3-three", "2-488724414d0ed6b398d6d2aeb228d797", "1-cb0c9a22be0e5a1b01084ec019defa81"} - doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", body, history, false, ExistingVersionWithUpdateToHLV) + doc, newRev, err := collection.PutExistingRevWithBody(ctx, "doc1", body, history, false) body[BodyId] = doc.ID body[BodyRev] = newRev assert.NoError(t, err, "PutExistingRev failed") @@ -1020,18 +1020,18 @@ func TestRepeatedConflict(t *testing.T) { // Create rev 1 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 body["channels"] = []string{"all", "2b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") body["n"] = 3 body["channels"] = []string{"all", "2a"} - _, newRev, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, newRev, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") // Get the _rev that was set in the body by PutExistingRevWithBody() and make assertions on it @@ -1040,7 +1040,7 @@ func TestRepeatedConflict(t *testing.T) { // Remove the _rev key from the body, and call PutExistingRevWithBody() again, which should re-add it delete(body, BodyRev) - _, newRev, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, newRev, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err) // The _rev should pass the same assertions as before, since PutExistingRevWithBody() should re-add it @@ -1068,7 +1068,7 @@ func TestConflicts(t *testing.T) { // Create rev 1 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Wait for rev to be cached @@ -1081,11 +1081,11 @@ func TestConflicts(t *testing.T) { // Create two conflicting changes: body["n"] = 2 body["channels"] = []string{"all", "2b"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") body["n"] = 3 body["channels"] = []string{"all", "2a"} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") cacheWaiter.Add(2) @@ -1213,55 +1213,55 @@ func TestNoConflictsMode(t *testing.T) { // Create revs 1 and 2 of "doc": body := Body{"n": 1, "channels": []string{"all", "1"}} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") // Try to create a conflict branching from rev 1: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-b", "1-a"}, false) assertHTTPError(t, err, 409) // Try to create a conflict with no common ancestor: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-c", "1-c"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-c", "1-c"}, false) assertHTTPError(t, err, 409) // Try to create a conflict with a longer history: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-d", "3-d", "2-d", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-d", "3-d", "2-d", "1-a"}, false) assertHTTPError(t, err, 409) // Try to create a conflict with no history: - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-e"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-e"}, false) assertHTTPError(t, err, 409) // Create a non-conflict with a longer history, ending in a deletion: body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "add 4-a") delete(body, BodyDeleted) // Try to resurrect the document with a conflicting branch - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-f", "3-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"4-f", "3-a"}, false) assertHTTPError(t, err, 409) // Resurrect the tombstoned document with a disconnected branch): - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-f"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"1-f"}, false) assert.NoError(t, err, "add 1-f") // Tombstone the resurrected branch body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-f", "1-f"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"2-f", "1-f"}, false) assert.NoError(t, err, "add 2-f") delete(body, BodyDeleted) // Resurrect the tombstoned document with a valid history (descendents of leaf) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-f", "4-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc", body, []string{"5-f", "4-a"}, false) assert.NoError(t, err, "add 5-f") delete(body, BodyDeleted) // Create a new document with a longer history: - _, _, err = collection.PutExistingRevWithBody(ctx, "COD", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "COD", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "add COD") delete(body, BodyDeleted) @@ -1289,34 +1289,34 @@ func TestAllowConflictsFalseTombstoneExistingConflict(t *testing.T) { // Create documents with multiple non-deleted branches log.Printf("Creating docs") body := Body{"n": 1} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") body["n"] = 3 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") // Set AllowConflicts to false db.Options.AllowConflicts = base.BoolPtr(false) // Attempt to tombstone a non-leaf node of a conflicted document - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false) assert.True(t, err != nil, "expected error tombstoning non-leaf") // Tombstone the non-winning branch of a conflicted document @@ -1366,27 +1366,27 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Create documents with multiple non-deleted branches log.Printf("Creating docs") body := Body{"n": 1} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"1-a"}, false) assert.NoError(t, err, "add 1-a") // Create two conflicting changes: body["n"] = 2 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "add 2-b") body["n"] = 3 - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "add 2-a") // Set AllowConflicts to false @@ -1395,12 +1395,12 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Attempt to tombstone a non-leaf node of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-c", "1-a"}, false) assert.True(t, err != nil, "expected error tombstoning non-leaf") // Tombstone the non-winning branch of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a"}, false) assert.NoError(t, err, "add 3-a (tombstone)") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1408,7 +1408,7 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Tombstone the winning branch of a conflicted document body[BodyDeleted] = true - _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc2", body, []string{"3-b", "2-b"}, false) assert.NoError(t, err, "add 3-b (tombstone)") doc, err = collection.GetDocument(ctx, "doc2", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1417,7 +1417,7 @@ func TestAllowConflictsFalseTombstoneExistingConflictNewEditsFalse(t *testing.T) // Set revs_limit=1, then tombstone non-winning branch of a conflicted document. Validate retrieval still works. body[BodyDeleted] = true db.RevsLimit = uint32(1) - _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc3", body, []string{"3-a", "2-a"}, false) assert.NoError(t, err, "add 3-a (tombstone)") doc, err = collection.GetDocument(ctx, "doc3", DocUnmarshalAll) assert.NoError(t, err, "Retrieve doc post-tombstone") @@ -1453,7 +1453,7 @@ func TestSyncFnOnPush(t *testing.T) { body["channels"] = "clibup" history := []string{"4-four", "3-three", "2-488724414d0ed6b398d6d2aeb228d797", rev1id} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, history, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, history, false) assert.NoError(t, err, "PutExistingRev failed") // Check that the doc has the correct channel (test for issue #300) @@ -2145,7 +2145,7 @@ func TestConcurrentPushSameNewNonWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Emily", "age": 20} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false) assert.NoError(t, err, "Adding revision 3-b") } } @@ -2160,29 +2160,29 @@ func TestConcurrentPushSameNewNonWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Charlie", "age": 10} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 4-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-b") enableCallback = true body = Body{"name": "Emily", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b", "2-b", "1-a"}, false) assert.NoError(t, err, "Adding revision 3-b") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2203,7 +2203,7 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Charlie", "age": 10, BodyDeleted: true} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Couldn't add revision 4-a (tombstone)") } } @@ -2218,19 +2218,19 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-b") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2240,7 +2240,7 @@ func TestConcurrentPushSameTombstoneWinningRevision(t *testing.T) { enableCallback = true body = Body{"name": "Charlie", "age": 10, BodyDeleted: true} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Couldn't add revision 4-a (tombstone)") doc, err = collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2261,7 +2261,7 @@ func TestConcurrentPushDifferentUpdateNonWinningRevision(t *testing.T) { enableCallback = false body := Body{"name": "Joshua", "age": 11} collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b1", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b1", "2-b", "1-a"}, false) assert.NoError(t, err, "Couldn't add revision 3-b1") } } @@ -2276,29 +2276,29 @@ func TestConcurrentPushDifferentUpdateNonWinningRevision(t *testing.T) { collection := GetSingleDatabaseCollectionWithUser(t, db) body := Body{"name": "Olivia", "age": 80} - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"1-a"}, false) assert.NoError(t, err, "Adding revision 1-a") body = Body{"name": "Harry", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-a") body = Body{"name": "Amelia", "age": 20} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 3-a") body = Body{"name": "Charlie", "age": 10} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"4-a", "3-a", "2-a", "1-a"}, false) assert.NoError(t, err, "Adding revision 4-a") body = Body{"name": "Noah", "age": 40} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-b", "1-a"}, false) assert.NoError(t, err, "Adding revision 2-b") enableCallback = true body = Body{"name": "Liam", "age": 12} - _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b2", "2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-b2", "2-b", "1-a"}, false) assert.NoError(t, err, "Couldn't add revision 3-b2") doc, err := collection.GetDocument(ctx, "doc1", DocUnmarshalAll) @@ -2332,7 +2332,7 @@ func TestIncreasingRecentSequences(t *testing.T) { enableCallback = false // Write a doc collection := GetSingleDatabaseCollectionWithUser(t, db) - _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-abc", revid}, true, ExistingVersionWithUpdateToHLV) + _, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"2-abc", revid}, true) assert.NoError(t, err) } } @@ -2349,7 +2349,7 @@ func TestIncreasingRecentSequences(t *testing.T) { assert.NoError(t, err) enableCallback = true - doc, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-abc", "2-abc", revid}, true, ExistingVersionWithUpdateToHLV) + doc, _, err := collection.PutExistingRevWithBody(ctx, "doc1", body, []string{"3-abc", "2-abc", revid}, true) assert.NoError(t, err) assert.True(t, sort.IsSorted(base.SortedUint64Slice(doc.SyncData.RecentSequences))) @@ -2797,62 +2797,72 @@ func Test_invalidateAllPrincipalsCache(t *testing.T) { } func Test_resyncDocument(t *testing.T) { - if !base.TestUseXattrs() { - t.Skip("Walrus doesn't support xattr") + testCases := []struct { + useXattr bool + }{ + {useXattr: true}, + {useXattr: false}, } - db, ctx := setupTestDB(t) - defer db.Close(ctx) - db.Options.EnableXattr = true - db.Options.QueryPaginationLimit = 100 - collection := GetSingleDatabaseCollectionWithUser(t, db) + for _, testCase := range testCases { + t.Run(fmt.Sprintf("Test_resyncDocument with useXattr: %t", testCase.useXattr), func(t *testing.T) { + if !base.TestUseXattrs() && testCase.useXattr { + t.Skip("Don't run xattr tests on non xattr tests") + } + db, ctx := setupTestDB(t) + defer db.Close(ctx) - syncFn := ` + db.Options.EnableXattr = testCase.useXattr + db.Options.QueryPaginationLimit = 100 + collection := GetSingleDatabaseCollectionWithUser(t, db) + + syncFn := ` function sync(doc, oldDoc){ channel("channel." + "ABC"); } ` - _, err := collection.UpdateSyncFun(ctx, syncFn) - require.NoError(t, err) + _, err := collection.UpdateSyncFun(ctx, syncFn) + require.NoError(t, err) - docID := uuid.NewString() + docID := uuid.NewString() - updateBody := make(map[string]interface{}) - updateBody["val"] = "value" - _, doc, err := collection.Put(ctx, docID, updateBody) - require.NoError(t, err) - assert.NotNil(t, doc) + updateBody := make(map[string]interface{}) + updateBody["val"] = "value" + _, doc, err := collection.Put(ctx, docID, updateBody) + require.NoError(t, err) + assert.NotNil(t, doc) - syncFn = ` + syncFn = ` function sync(doc, oldDoc){ channel("channel." + "ABC12332423234"); } ` - _, err = collection.UpdateSyncFun(ctx, syncFn) - require.NoError(t, err) - - _, _, err = collection.resyncDocument(ctx, docID, realDocID(docID), false, []uint64{10}) - require.NoError(t, err) - err = collection.WaitForPendingChanges(ctx) - require.NoError(t, err) + _, err = collection.UpdateSyncFun(ctx, syncFn) + require.NoError(t, err) - syncData, err := collection.GetDocSyncData(ctx, docID) - assert.NoError(t, err) + _, _, err = collection.resyncDocument(ctx, docID, realDocID(docID), false, []uint64{10}) + require.NoError(t, err) + err = collection.WaitForPendingChanges(ctx) + require.NoError(t, err) - assert.Len(t, syncData.ChannelSet, 2) - assert.Len(t, syncData.Channels, 2) - found := false + syncData, err := collection.GetDocSyncData(ctx, docID) + assert.NoError(t, err) - for _, chSet := range syncData.ChannelSet { - if chSet.Name == "channel.ABC12332423234" { - found = true - break - } - } + assert.Len(t, syncData.ChannelSet, 2) + assert.Len(t, syncData.Channels, 2) + found := false - assert.True(t, found) - assert.Equal(t, 2, int(db.DbStats.Database().SyncFunctionCount.Value())) + for _, chSet := range syncData.ChannelSet { + if chSet.Name == "channel.ABC12332423234" { + found = true + break + } + } + assert.True(t, found) + assert.Equal(t, 2, int(db.DbStats.Database().SyncFunctionCount.Value())) + }) + } } func Test_getUpdatedDocument(t *testing.T) { diff --git a/db/document.go b/db/document.go index 94fc62ad48..83d67f5389 100644 --- a/db/document.go +++ b/db/document.go @@ -41,7 +41,6 @@ const ( DocUnmarshalHistory // Unmarshals history + rev + CAS only DocUnmarshalRev // Unmarshals rev + CAS only DocUnmarshalCAS // Unmarshals CAS (for import check) only - DocUnmarshalVV // Unmarshals Version Vector only DocUnmarshalNone // No unmarshalling (skips import/upgrade check) ) @@ -65,24 +64,23 @@ type ChannelSetEntry struct { // The sync-gateway metadata stored in the "_sync" property of a Couchbase document. type SyncData struct { - CurrentRev string `json:"rev"` - NewestRev string `json:"new_rev,omitempty"` // Newest rev, if different from CurrentRev - Flags uint8 `json:"flags,omitempty"` - Sequence uint64 `json:"sequence,omitempty"` - UnusedSequences []uint64 `json:"unused_sequences,omitempty"` // unused sequences due to update conflicts/CAS retry - RecentSequences []uint64 `json:"recent_sequences,omitempty"` // recent sequences for this doc - used in server dedup handling - Channels channels.ChannelMap `json:"channels,omitempty"` - Access UserAccessMap `json:"access,omitempty"` - RoleAccess UserAccessMap `json:"role_access,omitempty"` - Expiry *time.Time `json:"exp,omitempty"` // Document expiry. Information only - actual expiry/delete handling is done by bucket storage. Needs to be pointer for omitempty to work (see https://github.com/golang/go/issues/4357) - Cas string `json:"cas"` // String representation of a cas value, populated via macro expansion - Crc32c string `json:"value_crc32c"` // String representation of crc32c hash of doc body, populated via macro expansion - Crc32cUserXattr string `json:"user_xattr_value_crc32c,omitempty"` // String representation of crc32c hash of user xattr - TombstonedAt int64 `json:"tombstoned_at,omitempty"` // Time the document was tombstoned. Used for view compaction - Attachments AttachmentsMeta `json:"attachments,omitempty"` - ChannelSet []ChannelSetEntry `json:"channel_set"` - ChannelSetHistory []ChannelSetEntry `json:"channel_set_history"` - HLV *HybridLogicalVector `json:"_vv,omitempty"` + CurrentRev string `json:"rev"` + NewestRev string `json:"new_rev,omitempty"` // Newest rev, if different from CurrentRev + Flags uint8 `json:"flags,omitempty"` + Sequence uint64 `json:"sequence,omitempty"` + UnusedSequences []uint64 `json:"unused_sequences,omitempty"` // unused sequences due to update conflicts/CAS retry + RecentSequences []uint64 `json:"recent_sequences,omitempty"` // recent sequences for this doc - used in server dedup handling + Channels channels.ChannelMap `json:"channels,omitempty"` + Access UserAccessMap `json:"access,omitempty"` + RoleAccess UserAccessMap `json:"role_access,omitempty"` + Expiry *time.Time `json:"exp,omitempty"` // Document expiry. Information only - actual expiry/delete handling is done by bucket storage. Needs to be pointer for omitempty to work (see https://github.com/golang/go/issues/4357) + Cas string `json:"cas"` // String representation of a cas value, populated via macro expansion + Crc32c string `json:"value_crc32c"` // String representation of crc32c hash of doc body, populated via macro expansion + Crc32cUserXattr string `json:"user_xattr_value_crc32c,omitempty"` // String representation of crc32c hash of user xattr + TombstonedAt int64 `json:"tombstoned_at,omitempty"` // Time the document was tombstoned. Used for view compaction + Attachments AttachmentsMeta `json:"attachments,omitempty"` + ChannelSet []ChannelSetEntry `json:"channel_set"` + ChannelSetHistory []ChannelSetEntry `json:"channel_set_history"` // Only used for performance metrics: TimeSaved time.Time `json:"time_saved,omitempty"` // Timestamp of save. @@ -177,12 +175,11 @@ type Document struct { Cas uint64 // Document cas rawUserXattr []byte // Raw user xattr as retrieved from the bucket - Deleted bool - DocExpiry uint32 - RevID string - DocAttachments AttachmentsMeta - inlineSyncData bool - currentRevChannels base.Set // A base.Set of the current revision's channels (determined by SyncData.Channels at UnmarshalJSON time) + Deleted bool + DocExpiry uint32 + RevID string + DocAttachments AttachmentsMeta + inlineSyncData bool } type historyOnlySyncData struct { @@ -970,7 +967,6 @@ func (doc *Document) updateChannels(ctx context.Context, newChannels base.Set) ( doc.updateChannelHistory(channel, doc.Sequence, true) } } - doc.currentRevChannels = newChannels if changed != nil { base.InfofCtx(ctx, base.KeyCRUD, "\tDoc %q / %q in channels %q", base.UD(doc.ID), doc.CurrentRev, base.UD(newChannels)) changedChannels, err = channels.SetFromArray(changed, channels.KeepStar) @@ -1080,17 +1076,6 @@ func (doc *Document) UnmarshalJSON(data []byte) error { doc.SyncData = *syncData.SyncData } - // determine current revision's channels and store in-memory (avoids doc.Channels iteration at access-check time) - if len(doc.Channels) > 0 { - ch := base.SetOf() - for channelName, channelRemoval := range doc.Channels { - if channelRemoval == nil || channelRemoval.Seq == 0 { - ch.Add(channelName) - } - } - doc.currentRevChannels = ch - } - // Unmarshal the rest of the doc body as map[string]interface{} if err := doc._body.Unmarshal(data); err != nil { return pkgerrors.WithStack(base.RedactErrorf("Failed to UnmarshalJSON() doc with id: %s. Error: %v", base.UD(doc.ID), err)) @@ -1145,6 +1130,7 @@ func (doc *Document) UnmarshalWithXattr(ctx context.Context, data []byte, xdata if unmarshalLevel == DocUnmarshalAll && len(data) > 0 { return doc._body.Unmarshal(data) } + case DocUnmarshalNoHistory: // Unmarshal sync metadata only, excluding history doc.SyncData = SyncData{} @@ -1188,14 +1174,6 @@ func (doc *Document) UnmarshalWithXattr(ctx context.Context, data []byte, xdata Cas: casOnlyMeta.Cas, } doc._rawBody = data - case DocUnmarshalVV: - tmpData := SyncData{} - unmarshalErr := base.JSONUnmarshal(xdata, &tmpData) - if unmarshalErr != nil { - return base.RedactErrorf("Failed to UnmarshalWithXattr() doc with id: %s (DocUnmarshalVV). Error: %w", base.UD(doc.ID), unmarshalErr) - } - doc.SyncData.HLV = tmpData.HLV - doc._rawBody = data } // If there's no body, but there is an xattr, set deleted flag and initialize an empty body @@ -1237,17 +1215,3 @@ func (doc *Document) MarshalWithXattr() (data []byte, xdata []byte, err error) { return data, xdata, nil } - -// HasCurrentVersion Compares the specified CV with the fetched documents CV, returns error on mismatch between the two -func (d *Document) HasCurrentVersion(cv CurrentVersionVector) error { - if d.HLV == nil { - return base.RedactErrorf("no HLV present in fetched doc %s", base.UD(d.ID)) - } - - // fetch the current version for the loaded doc and compare against the CV specified in the IDandCV key - fetchedDocSource, fetchedDocVersion := d.HLV.GetCurrentVersion() - if fetchedDocSource != cv.SourceID || fetchedDocVersion != cv.VersionCAS { - return base.RedactErrorf("mismatch between specified current version and fetched document current version for doc %s", base.UD(d.ID)) - } - return nil -} diff --git a/db/document_test.go b/db/document_test.go index 6301e99ec3..16fbd97ff4 100644 --- a/db/document_test.go +++ b/db/document_test.go @@ -14,7 +14,6 @@ import ( "bytes" "encoding/binary" "log" - "reflect" "testing" "github.com/couchbase/sync_gateway/base" @@ -191,106 +190,6 @@ func BenchmarkUnmarshalBody(b *testing.B) { } } -const doc_meta_with_vv = `{ - "rev": "3-89758294abc63157354c2b08547c2d21", - "sequence": 7, - "recent_sequences": [ - 5, - 6, - 7 - ], - "history": { - "revs": [ - "1-fc591a068c153d6c3d26023d0d93dcc1", - "2-0eab03571bc55510c8fc4bfac9fe4412", - "3-89758294abc63157354c2b08547c2d21" - ], - "parents": [ - -1, - 0, - 1 - ], - "channels": [ - [ - "ABC", - "DEF" - ], - [ - "ABC", - "DEF", - "GHI" - ], - [ - "ABC", - "GHI" - ] - ] - }, - "channels": { - "ABC": null, - "DEF": { - "seq": 7, - "rev": "3-89758294abc63157354c2b08547c2d21" - }, - "GHI": null - }, - "_vv":{ - "cvCas":"0x40e2010000000000", - "src":"cb06dc003846116d9b66d2ab23887a96", - "vrs":"0x40e2010000000000", - "mv":{ - "s_LhRPsa7CpjEvP5zeXTXEBA":"c0ff05d7ac059a16", - "s_NqiIe0LekFPLeX4JvTO6Iw":"1c008cd6ac059a16" - }, - "pv":{ - "s_YZvBpEaztom9z5V/hDoeIw":"f0ff44d6ac059a16" - } - }, - "cas": "", - "time_saved": "2017-10-25T12:45:29.622450174-07:00" - }` - -func TestParseVersionVectorSyncData(t *testing.T) { - mv := make(map[string]uint64) - pv := make(map[string]uint64) - mv["s_LhRPsa7CpjEvP5zeXTXEBA"] = 1628620455147864000 - mv["s_NqiIe0LekFPLeX4JvTO6Iw"] = 1628620455139868700 - pv["s_YZvBpEaztom9z5V/hDoeIw"] = 1628620455135215600 - - ctx := base.TestCtx(t) - - doc_meta := []byte(doc_meta_with_vv) - doc, err := unmarshalDocumentWithXattr(ctx, "doc_1k", nil, doc_meta, nil, 1, DocUnmarshalVV) - require.NoError(t, err) - - // assert on doc version vector values - assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) - assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) - assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) - assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) - assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) - - doc, err = unmarshalDocumentWithXattr(ctx, "doc1", nil, doc_meta, nil, 1, DocUnmarshalAll) - require.NoError(t, err) - - // assert on doc version vector values - assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) - assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) - assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) - assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) - assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) - - doc, err = unmarshalDocumentWithXattr(ctx, "doc1", nil, doc_meta, nil, 1, DocUnmarshalNoHistory) - require.NoError(t, err) - - // assert on doc version vector values - assert.Equal(t, uint64(123456), doc.SyncData.HLV.CurrentVersionCAS) - assert.Equal(t, uint64(123456), doc.SyncData.HLV.Version) - assert.Equal(t, "cb06dc003846116d9b66d2ab23887a96", doc.SyncData.HLV.SourceID) - assert.True(t, reflect.DeepEqual(mv, doc.SyncData.HLV.MergeVersions)) - assert.True(t, reflect.DeepEqual(pv, doc.SyncData.HLV.PreviousVersions)) -} - func TestParseXattr(t *testing.T) { zeroByte := byte(0) // Build payload for single xattr pair and body diff --git a/db/hybrid_logical_vector.go b/db/hybrid_logical_vector.go index 433e4bbd2c..686ed33575 100644 --- a/db/hybrid_logical_vector.go +++ b/db/hybrid_logical_vector.go @@ -10,15 +10,10 @@ package db import ( "fmt" - "math" - sgbucket "github.com/couchbase/sg-bucket" "github.com/couchbase/sync_gateway/base" ) -// hlvExpandMacroCASValue causes the field to be populated by CAS value by macro expansion -const hlvExpandMacroCASValue = math.MaxUint64 - type HybridLogicalVector struct { CurrentVersionCAS uint64 // current version cas (or cvCAS) stores the current CAS at the time of replication SourceID string // source bucket uuid of where this entry originated from @@ -41,6 +36,10 @@ type PersistedHybridLogicalVector struct { PreviousVersions map[string]string `json:"pv,omitempty"` } +type PersistedVersionVector struct { + PersistedHybridLogicalVector `json:"_vv"` +} + // NewHybridLogicalVector returns a HybridLogicalVector struct with maps initialised in the struct func NewHybridLogicalVector() HybridLogicalVector { return HybridLogicalVector{ @@ -68,13 +67,7 @@ func (hlv *HybridLogicalVector) IsInConflict(otherVector HybridLogicalVector) bo // previous versions on the HLV if needed func (hlv *HybridLogicalVector) AddVersion(newVersion CurrentVersionVector) error { if newVersion.VersionCAS < hlv.Version { - return fmt.Errorf("attempting to add new verison vector entry with a CAS that is less than the current version CAS value. Current cas: %d new cas %d", hlv.Version, newVersion.VersionCAS) - } - // check if this is the first time we're adding a source - version pair - if hlv.SourceID == "" { - hlv.Version = newVersion.VersionCAS - hlv.SourceID = newVersion.SourceID - return nil + return fmt.Errorf("attempting to add new verison vector entry with a CAS that is less than the current version CAS value") } // if new entry has the same source we simple just update the version if newVersion.SourceID == hlv.SourceID { @@ -82,9 +75,6 @@ func (hlv *HybridLogicalVector) AddVersion(newVersion CurrentVersionVector) erro return nil } // if we get here this is a new version from a different sourceID thus need to move current sourceID to previous versions and update current version - if hlv.PreviousVersions == nil { - hlv.PreviousVersions = make(map[string]uint64) - } hlv.PreviousVersions[hlv.SourceID] = hlv.Version hlv.Version = newVersion.VersionCAS hlv.SourceID = newVersion.SourceID @@ -180,7 +170,7 @@ func (hlv *HybridLogicalVector) GetVersion(sourceID string) uint64 { return latestVersion } -func (hlv HybridLogicalVector) MarshalJSON() ([]byte, error) { +func (hlv *HybridLogicalVector) MarshalJSON() ([]byte, error) { persistedHLV, err := hlv.convertHLVToPersistedFormat() if err != nil { @@ -191,7 +181,7 @@ func (hlv HybridLogicalVector) MarshalJSON() ([]byte, error) { } func (hlv *HybridLogicalVector) UnmarshalJSON(inputjson []byte) error { - persistedJSON := PersistedHybridLogicalVector{} + persistedJSON := PersistedVersionVector{} err := base.JSONUnmarshal(inputjson, &persistedJSON) if err != nil { return err @@ -201,16 +191,13 @@ func (hlv *HybridLogicalVector) UnmarshalJSON(inputjson []byte) error { return nil } -func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedHybridLogicalVector, error) { - persistedHLV := PersistedHybridLogicalVector{} +func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedVersionVector, error) { + persistedHLV := PersistedVersionVector{} var cvCasByteArray []byte - var vrsCasByteArray []byte if hlv.CurrentVersionCAS != 0 { cvCasByteArray = base.Uint64CASToLittleEndianHex(hlv.CurrentVersionCAS) } - if hlv.Version != 0 { - vrsCasByteArray = base.Uint64CASToLittleEndianHex(hlv.Version) - } + vrsCasByteArray := base.Uint64CASToLittleEndianHex(hlv.Version) pvPersistedFormat, err := convertMapToPersistedFormat(hlv.PreviousVersions) if err != nil { @@ -229,7 +216,7 @@ func (hlv *HybridLogicalVector) convertHLVToPersistedFormat() (*PersistedHybridL return &persistedHLV, nil } -func (hlv *HybridLogicalVector) convertPersistedHLVToInMemoryHLV(persistedJSON PersistedHybridLogicalVector) { +func (hlv *HybridLogicalVector) convertPersistedHLVToInMemoryHLV(persistedJSON PersistedVersionVector) { hlv.CurrentVersionCAS = base.HexCasToUint64(persistedJSON.CurrentVersionCAS) hlv.SourceID = persistedJSON.SourceID // convert the hex cas to uint64 cas @@ -269,17 +256,3 @@ func convertMapToInMemoryFormat(persistedMap map[string]string) map[string]uint6 } return returnedMap } - -// computeMacroExpansions returns the mutate in spec needed for the document update based off the outcome in updateHLV -func (hlv *HybridLogicalVector) computeMacroExpansions() []sgbucket.MacroExpansionSpec { - var outputSpec []sgbucket.MacroExpansionSpec - if hlv.Version == hlvExpandMacroCASValue { - spec := sgbucket.NewMacroExpansionSpec(xattrCurrentVersionPath(base.SyncXattrName), sgbucket.MacroCas) - outputSpec = append(outputSpec, spec) - } - if hlv.CurrentVersionCAS == hlvExpandMacroCASValue { - spec := sgbucket.NewMacroExpansionSpec(xattrCurrentVersionCASPath(base.SyncXattrName), sgbucket.MacroCas) - outputSpec = append(outputSpec, spec) - } - return outputSpec -} diff --git a/db/import.go b/db/import.go index 6db16664b9..b578ab1fae 100644 --- a/db/import.go +++ b/db/import.go @@ -139,8 +139,7 @@ func (db *DatabaseCollectionWithUser) importDoc(ctx context.Context, docid strin existingDoc.Expiry = *expiry } - docUpdateEvent := Import - docOut, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, true, existingDoc.Expiry, mutationOptions, docUpdateEvent, existingDoc, func(doc *Document) (resultDocument *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { + docOut, _, err = db.updateAndReturnDoc(ctx, newDoc.ID, true, existingDoc.Expiry, mutationOptions, existingDoc, func(doc *Document) (resultDocument *Document, resultAttachmentData AttachmentData, createNewRevIDSkipped bool, updatedExpiry *uint32, resultErr error) { // Perform cas mismatch check first, as we want to identify cas mismatch before triggering migrate handling. // If there's a cas mismatch, the doc has been updated since the version that triggered the import. Handling depends on import mode. if doc.Cas != existingDoc.Cas { diff --git a/db/query_test.go b/db/query_test.go index 81d262c96f..2ef43c0f82 100644 --- a/db/query_test.go +++ b/db/query_test.go @@ -372,7 +372,7 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { // Create 10 added documents for i := 1; i <= 10; i++ { id := "created" + strconv.Itoa(i) - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "1-a", revId) docIdFlagMap[doc.ID] = uint8(0x0) @@ -385,12 +385,12 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { // Create 10 deleted documents for i := 1; i <= 10; i++ { id := "deleted" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "1-a", revId) body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "2-a", revId, "Couldn't create tombstone revision") @@ -402,22 +402,22 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-a", revId, "Couldn't create tombstone revision") @@ -429,27 +429,27 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched|deleted" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) body[BodyDeleted] = true - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"3-a", "2-a"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-a", revId, "Couldn't create tombstone revision") body[BodyDeleted] = true - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-b", "2-b"}, false, ExistingVersionWithUpdateToHLV) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"3-b", "2-b"}, false) require.NoError(t, err, "Couldn't create document") require.Equal(t, "3-b", revId, "Couldn't create tombstone revision") @@ -461,17 +461,17 @@ func TestQueryChannelsActiveOnlyWithLimit(t *testing.T) { for i := 1; i <= 10; i++ { body["sound"] = "meow" id := "branched|conflict" + strconv.Itoa(i) - _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"1-a"}, false) require.NoError(t, err, "Couldn't create document revision 1-a") require.Equal(t, "1-a", revId) body["sound"] = "bark" - _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false, ExistingVersionWithUpdateToHLV) + _, revId, err = collection.PutExistingRevWithBody(ctx, id, body, []string{"2-b", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-b") require.Equal(t, "2-b", revId) body["sound"] = "bleat" - doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false, ExistingVersionWithUpdateToHLV) + doc, revId, err := collection.PutExistingRevWithBody(ctx, id, body, []string{"2-a", "1-a"}, false) require.NoError(t, err, "Couldn't create revision 2-a") require.Equal(t, "2-a", revId) diff --git a/db/revision_cache_bypass.go b/db/revision_cache_bypass.go index 1b05788870..049faeb937 100644 --- a/db/revision_cache_bypass.go +++ b/db/revision_cache_bypass.go @@ -30,8 +30,8 @@ func NewBypassRevisionCache(backingStore RevisionCacheBackingStore, bypassStat * } } -// GetWithRev fetches the revision for the given docID and revID immediately from the bucket. -func (rc *BypassRevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { +// Get fetches the revision for the given docID and revID immediately from the bucket. +func (rc *BypassRevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error) { unmarshalLevel := DocUnmarshalSync if includeBody { @@ -45,33 +45,7 @@ func (rc *BypassRevisionCache) GetWithRev(ctx context.Context, docID, revID stri docRev = DocumentRevision{ RevID: revID, } - docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.CV, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, revID) - if err != nil { - return DocumentRevision{}, err - } - - rc.bypassStat.Add(1) - - return docRev, nil -} - -// GetWithCV fetches the Current Version for the given docID and CV immediately from the bucket. -func (rc *BypassRevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { - - unmarshalLevel := DocUnmarshalSync - if includeBody { - unmarshalLevel = DocUnmarshalAll - } - docRev = DocumentRevision{ - CV: cv, - } - - doc, err := rc.backingStore.GetDocument(ctx, docID, unmarshalLevel) - if err != nil { - return DocumentRevision{}, err - } - - docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.RevID, err = revCacheLoaderForDocumentCV(ctx, rc.backingStore, doc, *cv) + docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, revID) if err != nil { return DocumentRevision{}, err } @@ -97,7 +71,7 @@ func (rc *BypassRevisionCache) GetActive(ctx context.Context, docID string, incl RevID: doc.CurrentRev, } - docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, docRev.CV, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, doc.SyncData.CurrentRev) + docRev.BodyBytes, docRev._shallowCopyBody, docRev.History, docRev.Channels, docRev.Removed, docRev.Attachments, docRev.Deleted, docRev.Expiry, err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, doc.SyncData.CurrentRev) if err != nil { return DocumentRevision{}, err } @@ -122,11 +96,7 @@ func (rc *BypassRevisionCache) Upsert(ctx context.Context, docRev DocumentRevisi // no-op } -func (rc *BypassRevisionCache) RemoveWithRev(docID, revID string) { - // nop -} - -func (rc *BypassRevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { +func (rc *BypassRevisionCache) Remove(docID, revID string) { // nop } diff --git a/db/revision_cache_interface.go b/db/revision_cache_interface.go index e50ba72f98..cd8ba32b39 100644 --- a/db/revision_cache_interface.go +++ b/db/revision_cache_interface.go @@ -28,15 +28,10 @@ const ( // RevisionCache is an interface that can be used to fetch a DocumentRevision for a Doc ID and Rev ID pair. type RevisionCache interface { - // GetWithRev returns the given revision, and stores if not already cached. + // Get returns the given revision, and stores if not already cached. // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. // When includeDelta=true, the returned DocumentRevision will include delta - requires additional locking during retrieval. - GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (DocumentRevision, error) - - // GetWithCV returns the given revision by CV, and stores if not already cached. - // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. - // When includeDelta=true, the returned DocumentRevision will include delta - requires additional locking during retrieval. - GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (DocumentRevision, error) + Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error) // GetActive returns the current revision for the given doc ID, and stores if not already cached. // When includeBody=true, the returned DocumentRevision will include a mutable shallow copy of the marshaled body. @@ -51,11 +46,8 @@ type RevisionCache interface { // Update will remove existing value and re-create new one Upsert(ctx context.Context, docRev DocumentRevision) - // RemoveWithRev evicts a revision from the cache using its revID. - RemoveWithRev(docID, revID string) - - // RemoveWithCV evicts a revision from the cache using its current version. - RemoveWithCV(docID string, cv *CurrentVersionVector) + // Remove eliminates a revision in the cache. + Remove(docID, revID string) // UpdateDelta stores the given toDelta value in the given rev if cached UpdateDelta(ctx context.Context, docID, revID string, toDelta RevisionDelta) @@ -112,7 +104,6 @@ func DefaultRevisionCacheOptions() *RevisionCacheOptions { type RevisionCacheBackingStore interface { GetDocument(ctx context.Context, docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error) getRevision(ctx context.Context, doc *Document, revid string) ([]byte, Body, AttachmentsMeta, error) - getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) } // DocumentRevision stored and returned by the rev cache @@ -128,7 +119,6 @@ type DocumentRevision struct { Delta *RevisionDelta Deleted bool Removed bool // True if the revision is a removal. - CV *CurrentVersionVector _shallowCopyBody Body // an unmarshalled body that can produce shallow copies } @@ -233,12 +223,6 @@ type IDAndRev struct { RevID string } -type IDandCV struct { - DocID string - Version uint64 - Source string -} - // RevisionDelta stores data about a delta between a revision and ToRevID. type RevisionDelta struct { ToRevID string // Target revID for the delta @@ -262,104 +246,44 @@ func newRevCacheDelta(deltaBytes []byte, fromRevID string, toRevision DocumentRe // This is the RevisionCacheLoaderFunc callback for the context's RevisionCache. // Its job is to load a revision from the bucket when there's a cache miss. -func revCacheLoader(ctx context.Context, backingStore RevisionCacheBackingStore, id IDAndRev, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, fetchedCV *CurrentVersionVector, err error) { +func revCacheLoader(ctx context.Context, backingStore RevisionCacheBackingStore, id IDAndRev, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, err error) { var doc *Document unmarshalLevel := DocUnmarshalSync if unmarshalBody { unmarshalLevel = DocUnmarshalAll } if doc, err = backingStore.GetDocument(ctx, id.DocID, unmarshalLevel); doc == nil { - return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, fetchedCV, err + return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, err } return revCacheLoaderForDocument(ctx, backingStore, doc, id.RevID) } -// revCacheLoaderForCv will load a document from the bucket using the CV, comapre the fetched doc and the CV specified in the function, -// and will still return revid for purpose of populating the Rev ID lookup map on the cache -func revCacheLoaderForCv(ctx context.Context, backingStore RevisionCacheBackingStore, id IDandCV, unmarshalBody bool) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, revid string, err error) { - cv := CurrentVersionVector{ - VersionCAS: id.Version, - SourceID: id.Source, - } - var doc *Document - unmarshalLevel := DocUnmarshalSync - if unmarshalBody { - unmarshalLevel = DocUnmarshalAll - } - if doc, err = backingStore.GetDocument(ctx, id.DocID, unmarshalLevel); doc == nil { - return bodyBytes, body, history, channels, removed, attachments, deleted, expiry, revid, err - } - - return revCacheLoaderForDocumentCV(ctx, backingStore, doc, cv) -} - // Common revCacheLoader functionality used either during a cache miss (from revCacheLoader), or directly when retrieving current rev from cache -func revCacheLoaderForDocument(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, revid string) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, fetchedCV *CurrentVersionVector, err error) { +func revCacheLoaderForDocument(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, revid string) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, err error) { if bodyBytes, body, attachments, err = backingStore.getRevision(ctx, doc, revid); err != nil { // If we can't find the revision (either as active or conflicted body from the document, or as old revision body backup), check whether // the revision was a channel removal. If so, we want to store as removal in the revision cache removalBodyBytes, removalHistory, activeChannels, isRemoval, isDelete, isRemovalErr := doc.IsChannelRemoval(ctx, revid) if isRemovalErr != nil { - return bodyBytes, body, history, channels, isRemoval, nil, isDelete, nil, fetchedCV, isRemovalErr + return bodyBytes, body, history, channels, isRemoval, nil, isDelete, nil, isRemovalErr } if isRemoval { - return removalBodyBytes, body, removalHistory, activeChannels, isRemoval, nil, isDelete, nil, fetchedCV, nil + return removalBodyBytes, body, removalHistory, activeChannels, isRemoval, nil, isDelete, nil, nil } else { // If this wasn't a removal, return the original error from getRevision - return bodyBytes, body, history, channels, removed, nil, isDelete, nil, fetchedCV, err + return bodyBytes, body, history, channels, removed, nil, isDelete, nil, err } } deleted = doc.History[revid].Deleted validatedHistory, getHistoryErr := doc.History.getHistory(revid) if getHistoryErr != nil { - return bodyBytes, body, history, channels, removed, nil, deleted, nil, fetchedCV, getHistoryErr + return bodyBytes, body, history, channels, removed, nil, deleted, nil, getHistoryErr } history = encodeRevisions(ctx, doc.ID, validatedHistory) channels = doc.History[revid].Channels - if doc.HLV != nil { - fetchedCV = &CurrentVersionVector{SourceID: doc.HLV.SourceID, VersionCAS: doc.HLV.Version} - } - - return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, fetchedCV, err -} - -// revCacheLoaderForDocumentCV used either during cache miss (from revCacheLoaderForCv), or used directly when getting current active CV from cache -func revCacheLoaderForDocumentCV(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, cv CurrentVersionVector) (bodyBytes []byte, body Body, history Revisions, channels base.Set, removed bool, attachments AttachmentsMeta, deleted bool, expiry *time.Time, revid string, err error) { - if bodyBytes, body, attachments, err = backingStore.getCurrentVersion(ctx, doc); err != nil { - // we need implementation of IsChannelRemoval for CV here. - // pending CBG-3213 support of channel removal for CV - } - if err = doc.HasCurrentVersion(cv); err != nil { - return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, revid, err - } - channels = doc.currentRevChannels - revid = doc.CurrentRev - - return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, revid, err -} - -func (c *DatabaseCollection) getCurrentVersion(ctx context.Context, doc *Document) (bodyBytes []byte, body Body, attachments AttachmentsMeta, err error) { - bodyBytes, err = doc.BodyBytes(ctx) - if err != nil { - base.WarnfCtx(ctx, "Marshal error when retrieving active current version body: %v", err) - return nil, nil, nil, err - } - - body = doc._body - attachments = doc.Attachments - - // handle backup revision inline attachments, or pre-2.5 meta - if inlineAtts, cleanBodyBytes, cleanBody, err := extractInlineAttachments(bodyBytes); err != nil { - return nil, nil, nil, err - } else if len(inlineAtts) > 0 { - // we found some inline attachments, so merge them with attachments, and update the bodies - attachments = mergeAttachments(inlineAtts, attachments) - bodyBytes = cleanBodyBytes - body = cleanBody - } - return bodyBytes, body, attachments, err + return bodyBytes, body, history, channels, removed, attachments, deleted, doc.Expiry, err } diff --git a/db/revision_cache_lru.go b/db/revision_cache_lru.go index 32d78d7613..575c7c6811 100644 --- a/db/revision_cache_lru.go +++ b/db/revision_cache_lru.go @@ -45,12 +45,8 @@ func (sc *ShardedLRURevisionCache) getShard(docID string) *LRURevisionCache { return sc.caches[sgbucket.VBHash(docID, sc.numShards)] } -func (sc *ShardedLRURevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { - return sc.getShard(docID).GetWithRev(ctx, docID, revID, includeBody, includeDelta) -} - -func (sc *ShardedLRURevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (docRev DocumentRevision, err error) { - return sc.getShard(docID).GetWithCV(ctx, docID, cv, includeBody, includeDelta) +func (sc *ShardedLRURevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (docRev DocumentRevision, err error) { + return sc.getShard(docID).Get(ctx, docID, revID, includeBody, includeDelta) } func (sc *ShardedLRURevisionCache) Peek(ctx context.Context, docID, revID string) (docRev DocumentRevision, found bool) { @@ -73,19 +69,14 @@ func (sc *ShardedLRURevisionCache) Upsert(ctx context.Context, docRev DocumentRe sc.getShard(docRev.DocID).Upsert(ctx, docRev) } -func (sc *ShardedLRURevisionCache) RemoveWithRev(docID, revID string) { - sc.getShard(docID).RemoveWithRev(docID, revID) -} - -func (sc *ShardedLRURevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { - sc.getShard(docID).RemoveWithCV(docID, cv) +func (sc *ShardedLRURevisionCache) Remove(docID, revID string) { + sc.getShard(docID).Remove(docID, revID) } // An LRU cache of document revision bodies, together with their channel access. type LRURevisionCache struct { backingStore RevisionCacheBackingStore cache map[IDAndRev]*list.Element - hlvCache map[IDandCV]*list.Element lruList *list.List cacheHits *base.SgwIntStat cacheMisses *base.SgwIntStat @@ -102,9 +93,7 @@ type revCacheValue struct { attachments AttachmentsMeta delta *RevisionDelta body Body - id string - cv CurrentVersionVector - revID string + key IDAndRev bodyBytes []byte lock sync.RWMutex deleted bool @@ -116,7 +105,6 @@ func NewLRURevisionCache(capacity uint32, backingStore RevisionCacheBackingStore return &LRURevisionCache{ cache: map[IDAndRev]*list.Element{}, - hlvCache: map[IDandCV]*list.Element{}, lruList: list.New(), capacity: capacity, backingStore: backingStore, @@ -129,18 +117,14 @@ func NewLRURevisionCache(capacity uint32, backingStore RevisionCacheBackingStore // Returns the body of the revision, its history, and the set of channels it's in. // If the cache has a loaderFunction, it will be called if the revision isn't in the cache; // any error returned by the loaderFunction will be returned from Get. -func (rc *LRURevisionCache) GetWithRev(ctx context.Context, docID, revID string, includeBody, includeDelta bool) (DocumentRevision, error) { - return rc.getFromCacheByRev(ctx, docID, revID, true, includeBody, includeDelta) -} - -func (rc *LRURevisionCache) GetWithCV(ctx context.Context, docID string, cv *CurrentVersionVector, includeBody, includeDelta bool) (DocumentRevision, error) { - return rc.getFromCacheByCV(ctx, docID, cv, true, includeBody, includeDelta) +func (rc *LRURevisionCache) Get(ctx context.Context, docID, revID string, includeBody bool, includeDelta bool) (DocumentRevision, error) { + return rc.getFromCache(ctx, docID, revID, true, includeBody, includeDelta) } // Looks up a revision from the cache only. Will not fall back to loader function if not // present in the cache. func (rc *LRURevisionCache) Peek(ctx context.Context, docID, revID string) (docRev DocumentRevision, found bool) { - docRev, err := rc.getFromCacheByRev(ctx, docID, revID, false, RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := rc.getFromCache(ctx, docID, revID, false, RevCacheOmitBody, RevCacheOmitDelta) if err != nil { return DocumentRevision{}, false } @@ -156,42 +140,18 @@ func (rc *LRURevisionCache) UpdateDelta(ctx context.Context, docID, revID string } } -func (rc *LRURevisionCache) getFromCacheByRev(ctx context.Context, docID, revID string, loadOnCacheMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { +func (rc *LRURevisionCache) getFromCache(ctx context.Context, docID, revID string, loadOnCacheMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { value := rc.getValue(docID, revID, loadOnCacheMiss) if value == nil { return DocumentRevision{}, nil } - docRev, cacheHit, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) - rc.statsRecorderFunc(cacheHit) - - if err != nil { - rc.removeValue(value) // don't keep failed loads in the cache - } - if !cacheHit { - rc.addToHLVMapPostLoad(docID, docRev.RevID, docRev.CV) - } - - return docRev, err -} - -func (rc *LRURevisionCache) getFromCacheByCV(ctx context.Context, docID string, cv *CurrentVersionVector, loadCacheOnMiss bool, includeBody bool, includeDelta bool) (DocumentRevision, error) { - value := rc.getValueByCV(docID, cv, loadCacheOnMiss) - if value == nil { - return DocumentRevision{}, nil - } - - docRev, cacheHit, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) - rc.statsRecorderFunc(cacheHit) + docRev, statEvent, err := value.load(ctx, rc.backingStore, includeBody, includeDelta) + rc.statsRecorderFunc(statEvent) if err != nil { rc.removeValue(value) // don't keep failed loads in the cache } - - if !cacheHit { - rc.addToRevMapPostLoad(docID, docRev.RevID, docRev.CV) - } - return docRev, err } @@ -202,16 +162,15 @@ func (rc *LRURevisionCache) LoadInvalidRevFromBackingStore(ctx context.Context, var docRevBody Body value := revCacheValue{ - id: key.DocID, - revID: key.RevID, + key: key, } // If doc has been passed in use this to grab values. Otherwise run revCacheLoader which will grab the Document // first if doc != nil { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, _, value.err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, key.RevID) + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoaderForDocument(ctx, rc.backingStore, doc, key.RevID) } else { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, _, value.err = revCacheLoader(ctx, rc.backingStore, key, includeBody) + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoader(ctx, rc.backingStore, key, includeBody) } if includeDelta { @@ -251,15 +210,12 @@ func (rc *LRURevisionCache) GetActive(ctx context.Context, docID string, include // Retrieve from or add to rev cache value := rc.getValue(docID, bucketDoc.CurrentRev, true) - docRev, cacheHit, err := value.loadForDoc(ctx, rc.backingStore, bucketDoc, includeBody) - rc.statsRecorderFunc(cacheHit) + docRev, statEvent, err := value.loadForDoc(ctx, rc.backingStore, bucketDoc, includeBody) + rc.statsRecorderFunc(statEvent) if err != nil { rc.removeValue(value) // don't keep failed loads in the cache } - // add successfully fetched value to cv lookup map too - rc.addToHLVMapPostLoad(docID, docRev.RevID, docRev.CV) - return docRev, err } @@ -278,43 +234,30 @@ func (rc *LRURevisionCache) Put(ctx context.Context, docRev DocumentRevision) { // TODO: CBG-1948 panic("Missing history for RevisionCache.Put") } - // doc should always have a cv present in a PUT operation on the cache (update HLV is called before hand in doc update process) - // thus we can call getValueByCV directly the update the rev lookup post this - value := rc.getValueByCV(docRev.DocID, docRev.CV, true) - // store the created value + value := rc.getValue(docRev.DocID, docRev.RevID, true) value.store(docRev) - - // add new doc version to the rev id lookup map - rc.addToRevMapPostLoad(docRev.DocID, docRev.RevID, docRev.CV) } // Upsert a revision in the cache. func (rc *LRURevisionCache) Upsert(ctx context.Context, docRev DocumentRevision) { - var value *revCacheValue - // similar to PUT operation we should have the CV defined by this point (updateHLV is called before calling this) - key := IDandCV{DocID: docRev.DocID, Source: docRev.CV.SourceID, Version: docRev.CV.VersionCAS} - legacyKey := IDAndRev{DocID: docRev.DocID, RevID: docRev.RevID} + key := IDAndRev{DocID: docRev.DocID, RevID: docRev.RevID} rc.lock.Lock() - // lookup for element in hlv lookup map, if not found for some reason try rev lookup map - if elem := rc.hlvCache[key]; elem != nil { - rc.lruList.Remove(elem) - } else if elem = rc.cache[legacyKey]; elem != nil { + // If element exists remove from lrulist + if elem := rc.cache[key]; elem != nil { rc.lruList.Remove(elem) } // Add new value and overwrite existing cache key, pushing to front to maintain order - // also ensure we add to rev id lookup map too - value = &revCacheValue{id: docRev.DocID, cv: *docRev.CV} - elem := rc.lruList.PushFront(value) - rc.hlvCache[key] = elem - rc.cache[legacyKey] = elem + value := &revCacheValue{key: key} + rc.cache[key] = rc.lruList.PushFront(value) - for rc.lruList.Len() > int(rc.capacity) { + // Purge oldest item if required + for len(rc.cache) > int(rc.capacity) { rc.purgeOldest_() } rc.lock.Unlock() - // store upsert value + value.store(docRev) } @@ -329,32 +272,9 @@ func (rc *LRURevisionCache) getValue(docID, revID string, create bool) (value *r rc.lruList.MoveToFront(elem) value = elem.Value.(*revCacheValue) } else if create { - value = &revCacheValue{id: docID, revID: revID} + value = &revCacheValue{key: key} rc.cache[key] = rc.lruList.PushFront(value) - for rc.lruList.Len() > int(rc.capacity) { - rc.purgeOldest_() - } - } - rc.lock.Unlock() - return -} - -// getValueByCV gets a value from rev cache by CV, if not found and create is true, will add the value to cache and both lookup maps -func (rc *LRURevisionCache) getValueByCV(docID string, cv *CurrentVersionVector, create bool) (value *revCacheValue) { - if docID == "" || cv == nil { - return nil - } - - key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} - rc.lock.Lock() - if elem := rc.hlvCache[key]; elem != nil { - rc.lruList.MoveToFront(elem) - value = elem.Value.(*revCacheValue) - } else if create { - value = &revCacheValue{id: docID, cv: *cv} - newElem := rc.lruList.PushFront(value) - rc.hlvCache[key] = newElem - for rc.lruList.Len() > int(rc.capacity) { + for len(rc.cache) > int(rc.capacity) { rc.purgeOldest_() } } @@ -362,93 +282,8 @@ func (rc *LRURevisionCache) getValueByCV(docID string, cv *CurrentVersionVector, return } -// addToRevMapPostLoad will generate and entry in the Rev lookup map for a new document entering the cache -func (rc *LRURevisionCache) addToRevMapPostLoad(docID, revID string, cv *CurrentVersionVector) { - legacyKey := IDAndRev{DocID: docID, RevID: revID} - key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} - - rc.lock.Lock() - defer rc.lock.Unlock() - // check for existing value in rev cache map (due to concurrent fetch by rev ID) - cvElem, cvFound := rc.hlvCache[key] - revElem, revFound := rc.cache[legacyKey] - if !cvFound { - // its possible the element has been evicted if we don't find the element above (high churn on rev cache) - // need to return doc revision to caller still but no need repopulate the cache - return - } - // Check if another goroutine has already updated the rev map - if revFound { - if cvElem == revElem { - // already match, return - return - } - // if CV map and rev map are targeting different list elements, update to have both use the cv map element - rc.cache[legacyKey] = cvElem - rc.lruList.Remove(revElem) - } else { - // if not found we need to add the element to the rev lookup (for PUT code path) - rc.cache[legacyKey] = cvElem - } -} - -// addToHLVMapPostLoad will generate and entry in the CV lookup map for a new document entering the cache -func (rc *LRURevisionCache) addToHLVMapPostLoad(docID, revID string, cv *CurrentVersionVector) { - legacyKey := IDAndRev{DocID: docID, RevID: revID} - key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} - - rc.lock.Lock() - defer rc.lock.Unlock() - // check for existing value in rev cache map (due to concurrent fetch by rev ID) - cvElem, cvFound := rc.hlvCache[key] - revElem, revFound := rc.cache[legacyKey] - if !revFound { - // its possible the element has been evicted if we don't find the element above (high churn on rev cache) - // need to return doc revision to caller still but no need repopulate the cache - return - } - // Check if another goroutine has already updated the cv map - if cvFound { - if cvElem == revElem { - // already match, return - return - } - // if CV map and rev map are targeting different list elements, update to have both use the cv map element - rc.cache[legacyKey] = cvElem - rc.lruList.Remove(revElem) - } -} - // Remove removes a value from the revision cache, if present. -func (rc *LRURevisionCache) RemoveWithRev(docID, revID string) { - rc.removeFromCacheByRev(docID, revID) -} - -// RemoveWithCV removes a value from rev cache by CV reference if present -func (rc *LRURevisionCache) RemoveWithCV(docID string, cv *CurrentVersionVector) { - rc.removeFromCacheByCV(docID, cv) -} - -// removeFromCacheByCV removes an entry from rev cache by CV -func (rc *LRURevisionCache) removeFromCacheByCV(docID string, cv *CurrentVersionVector) { - key := IDandCV{DocID: docID, Source: cv.SourceID, Version: cv.VersionCAS} - rc.lock.Lock() - defer rc.lock.Unlock() - element, ok := rc.hlvCache[key] - if !ok { - return - } - // grab the revid key from the value to enable us to remove the reference from the rev lookup map too - elem := element.Value.(*revCacheValue) - legacyKey := IDAndRev{DocID: docID, RevID: elem.revID} - rc.lruList.Remove(element) - delete(rc.hlvCache, key) - // remove from rev lookup map too - delete(rc.cache, legacyKey) -} - -// removeFromCacheByRev removes an entry from rev cache by revID -func (rc *LRURevisionCache) removeFromCacheByRev(docID, revID string) { +func (rc *LRURevisionCache) Remove(docID, revID string) { key := IDAndRev{DocID: docID, RevID: revID} rc.lock.Lock() defer rc.lock.Unlock() @@ -456,38 +291,23 @@ func (rc *LRURevisionCache) removeFromCacheByRev(docID, revID string) { if !ok { return } - // grab the cv key key from the value to enable us to remove the reference from the rev lookup map too - elem := element.Value.(*revCacheValue) - hlvKey := IDandCV{DocID: docID, Source: elem.cv.SourceID, Version: elem.cv.VersionCAS} rc.lruList.Remove(element) delete(rc.cache, key) - // remove from CV lookup map too - delete(rc.hlvCache, hlvKey) } // removeValue removes a value from the revision cache, if present and the value matches the the value. If there's an item in the revision cache with a matching docID and revID but the document is different, this item will not be removed from the rev cache. func (rc *LRURevisionCache) removeValue(value *revCacheValue) { rc.lock.Lock() - defer rc.lock.Unlock() - revKey := IDAndRev{DocID: value.id, RevID: value.revID} - if element := rc.cache[revKey]; element != nil && element.Value == value { + if element := rc.cache[value.key]; element != nil && element.Value == value { rc.lruList.Remove(element) - delete(rc.cache, revKey) - } - // need to also check hlv lookup cache map - hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} - if element := rc.hlvCache[hlvKey]; element != nil && element.Value == value { - rc.lruList.Remove(element) - delete(rc.hlvCache, hlvKey) + delete(rc.cache, value.key) } + rc.lock.Unlock() } func (rc *LRURevisionCache) purgeOldest_() { value := rc.lruList.Remove(rc.lruList.Back()).(*revCacheValue) - revKey := IDAndRev{DocID: value.id, RevID: value.revID} - hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} - delete(rc.cache, revKey) - delete(rc.hlvCache, hlvKey) + delete(rc.cache, value.key) } // Gets the body etc. out of a revCacheValue. If they aren't present already, the loader func @@ -499,8 +319,6 @@ func (value *revCacheValue) load(ctx context.Context, backingStore RevisionCache // to reduce locking when includeDelta=false var delta *RevisionDelta var docRevBody Body - var fetchedCV *CurrentVersionVector - var revid string // Attempt to read cached value. value.lock.RLock() @@ -531,24 +349,12 @@ func (value *revCacheValue) load(ctx context.Context, backingStore RevisionCache // If body is requested and not already present in cache, populate value.body from value.BodyBytes if includeBody && value.body == nil && value.err == nil { if err := value.body.Unmarshal(value.bodyBytes); err != nil { - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) } } } else { cacheHit = false - if value.revID == "" { - hlvKey := IDandCV{DocID: value.id, Source: value.cv.SourceID, Version: value.cv.VersionCAS} - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, revid, value.err = revCacheLoaderForCv(ctx, backingStore, hlvKey, includeBody) - // based off the current value load we need to populate the revid key with what has been fetched from the bucket (for use of populating the opposite lookup map) - value.revID = revid - } else { - revKey := IDAndRev{DocID: value.id, RevID: value.revID} - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, fetchedCV, value.err = revCacheLoader(ctx, backingStore, revKey, includeBody) - // based off the revision load we need to populate the hlv key with what has been fetched from the bucket (for use of populating the opposite lookup map) - if fetchedCV != nil { - value.cv = *fetchedCV - } - } + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoader(ctx, backingStore, value.key, includeBody) } if includeDelta { @@ -568,7 +374,7 @@ func (value *revCacheValue) updateBody(ctx context.Context) (err error) { var body Body if err := body.Unmarshal(value.bodyBytes); err != nil { // On unmarshal error, warn return docRev without body - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) return err } @@ -585,8 +391,8 @@ func (value *revCacheValue) updateBody(ctx context.Context) (err error) { func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) (DocumentRevision, error) { docRev := DocumentRevision{ - DocID: value.id, - RevID: value.revID, + DocID: value.key.DocID, + RevID: value.key.RevID, BodyBytes: value.bodyBytes, History: value.history, Channels: value.channels, @@ -594,7 +400,6 @@ func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) Attachments: value.attachments.ShallowCopy(), // Avoid caller mutating the stored attachments Deleted: value.deleted, Removed: value.removed, - CV: &CurrentVersionVector{VersionCAS: value.cv.VersionCAS, SourceID: value.cv.SourceID}, } if body != nil { docRev._shallowCopyBody = body.ShallowCopy() @@ -609,8 +414,6 @@ func (value *revCacheValue) asDocumentRevision(body Body, delta *RevisionDelta) func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore RevisionCacheBackingStore, doc *Document, includeBody bool) (docRev DocumentRevision, cacheHit bool, err error) { var docRevBody Body - var fetchedCV *CurrentVersionVector - var revid string value.lock.RLock() if value.bodyBytes != nil || value.err != nil { if includeBody { @@ -640,22 +443,13 @@ func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore Revisio // If body is requested and not already present in cache, attempt to generate from bytes and insert into cache if includeBody && value.body == nil { if err := value.body.Unmarshal(value.bodyBytes); err != nil { - base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.id), value.revID) + base.WarnfCtx(ctx, "Unable to marshal BodyBytes in revcache for %s %s", base.UD(value.key.DocID), value.key.RevID) } } } else { cacheHit = false - if value.revID == "" { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, revid, value.err = revCacheLoaderForDocumentCV(ctx, backingStore, doc, value.cv) - value.revID = revid - } else { - value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, fetchedCV, value.err = revCacheLoaderForDocument(ctx, backingStore, doc, value.revID) - if fetchedCV != nil { - value.cv = *fetchedCV - } - } + value.bodyBytes, value.body, value.history, value.channels, value.removed, value.attachments, value.deleted, value.expiry, value.err = revCacheLoaderForDocument(ctx, backingStore, doc, value.key.RevID) } - if includeBody { docRevBody = value.body } @@ -668,7 +462,7 @@ func (value *revCacheValue) loadForDoc(ctx context.Context, backingStore Revisio func (value *revCacheValue) store(docRev DocumentRevision) { value.lock.Lock() if value.bodyBytes == nil { - value.revID = docRev.RevID + // value already has doc id/rev id in key value.bodyBytes = docRev.BodyBytes value.history = docRev.History value.channels = docRev.Channels diff --git a/db/revision_cache_test.go b/db/revision_cache_test.go index d5abbe6b97..1451d353d9 100644 --- a/db/revision_cache_test.go +++ b/db/revision_cache_test.go @@ -50,13 +50,6 @@ func (t *testBackingStore) GetDocument(ctx context.Context, docid string, unmars Channels: base.SetOf("*"), }, } - doc.currentRevChannels = base.SetOf("*") - - doc.HLV = &HybridLogicalVector{ - SourceID: "test", - Version: 123, - } - return doc, nil } @@ -73,19 +66,6 @@ func (t *testBackingStore) getRevision(ctx context.Context, doc *Document, revid return bodyBytes, b, nil, err } -func (t *testBackingStore) getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) { - t.getRevisionCounter.Add(1) - - b := Body{ - "testing": true, - BodyId: doc.ID, - BodyRev: doc.CurrentRev, - "current_version": &CurrentVersionVector{VersionCAS: doc.HLV.Version, SourceID: doc.HLV.SourceID}, - } - bodyBytes, err := base.JSONMarshal(b) - return bodyBytes, b, nil, err -} - type noopBackingStore struct{} func (*noopBackingStore) GetDocument(ctx context.Context, docid string, unmarshalLevel DocumentUnmarshalLevel) (doc *Document, err error) { @@ -96,10 +76,6 @@ func (*noopBackingStore) getRevision(ctx context.Context, doc *Document, revid s return nil, nil, nil, nil } -func (*noopBackingStore) getCurrentVersion(ctx context.Context, doc *Document) ([]byte, Body, AttachmentsMeta, error) { - return nil, nil, nil, nil -} - // Tests the eviction from the LRURevisionCache func TestLRURevisionCacheEviction(t *testing.T) { cacheHitCounter, cacheMissCounter := base.SgwIntStat{}, base.SgwIntStat{} @@ -110,13 +86,13 @@ func TestLRURevisionCacheEviction(t *testing.T) { // Fill up the rev cache with the first 10 docs for docID := 0; docID < 10; docID++ { id := strconv.Itoa(docID) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", History: Revisions{"start": 1}}) } // Get them back out for i := 0; i < 10; i++ { docID := strconv.Itoa(i) - docRev, err := cache.GetWithRev(ctx, docID, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.Get(ctx, docID, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.NotNil(t, docRev.BodyBytes, "nil body for %s", docID) assert.Equal(t, docID, docRev.DocID) @@ -127,7 +103,7 @@ func TestLRURevisionCacheEviction(t *testing.T) { // Add 3 more docs to the now full revcache for i := 10; i < 13; i++ { docID := strconv.Itoa(i) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: docID, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(i), SourceID: "test"}, History: Revisions{"start": 1}}) + cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: docID, RevID: "1-abc", History: Revisions{"start": 1}}) } // Check that the first 3 docs were evicted @@ -144,68 +120,7 @@ func TestLRURevisionCacheEviction(t *testing.T) { // and check we can Get up to and including the last 3 we put in for i := 0; i < 10; i++ { id := strconv.Itoa(i + 3) - docRev, err := cache.GetWithRev(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) - assert.NoError(t, err) - assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) - assert.Equal(t, id, docRev.DocID) - assert.Equal(t, int64(0), cacheMissCounter.Value()) - assert.Equal(t, prevCacheHitCount+int64(i)+1, cacheHitCounter.Value()) - } -} - -// TestLRURevisionCacheEvictionMixedRevAndCV: -// - Add 10 docs to the cache -// - Assert that the cache list and relevant lookup maps have correct lengths -// - Add 3 more docs -// - Assert that lookup maps and the cache list still only have 10 elements in -// - Perform a Get with CV specified on all 10 elements in the cache and assert we get a hit for each element and no misses, -// testing the eviction worked correct -// - Then do the same but for rev lookup -func TestLRURevisionCacheEvictionMixedRevAndCV(t *testing.T) { - cacheHitCounter, cacheMissCounter := base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &noopBackingStore{}, &cacheHitCounter, &cacheMissCounter) - - ctx := base.TestCtx(t) - - // Fill up the rev cache with the first 10 docs - for docID := 0; docID < 10; docID++ { - id := strconv.Itoa(docID) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) - } - - // assert that the list has 10 elements along with both lookup maps - assert.Equal(t, 10, len(cache.hlvCache)) - assert.Equal(t, 10, len(cache.cache)) - assert.Equal(t, 10, cache.lruList.Len()) - - // Add 3 more docs to the now full rev cache to trigger eviction - for docID := 10; docID < 13; docID++ { - id := strconv.Itoa(docID) - cache.Put(ctx, DocumentRevision{BodyBytes: []byte(`{}`), DocID: id, RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(docID), SourceID: "test"}, History: Revisions{"start": 1}}) - } - // assert the cache and associated lookup maps only have 10 items in them (i.e.e is eviction working?) - assert.Equal(t, 10, len(cache.hlvCache)) - assert.Equal(t, 10, len(cache.cache)) - assert.Equal(t, 10, cache.lruList.Len()) - - // assert we can get a hit on all 10 elements in the cache by CV lookup - prevCacheHitCount := cacheHitCounter.Value() - for i := 0; i < 10; i++ { - id := strconv.Itoa(i + 3) - cv := CurrentVersionVector{VersionCAS: uint64(i + 3), SourceID: "test"} - docRev, err := cache.GetWithCV(ctx, id, &cv, RevCacheOmitBody, RevCacheOmitDelta) - assert.NoError(t, err) - assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) - assert.Equal(t, id, docRev.DocID) - assert.Equal(t, int64(0), cacheMissCounter.Value()) - assert.Equal(t, prevCacheHitCount+int64(i)+1, cacheHitCounter.Value()) - } - - // now do same but for rev lookup - prevCacheHitCount = cacheHitCounter.Value() - for i := 0; i < 10; i++ { - id := strconv.Itoa(i + 3) - docRev, err := cache.GetWithRev(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.Get(ctx, id, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.NotNil(t, docRev.BodyBytes, "nil body for %s", id) assert.Equal(t, id, docRev.DocID) @@ -220,7 +135,7 @@ func TestBackingStore(t *testing.T) { cache := NewLRURevisionCache(10, &testBackingStore{[]string{"Peter"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) // Get Rev for the first time - miss cache, but fetch the doc and revision to store - docRev, err := cache.GetWithRev(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err := cache.Get(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.Equal(t, "Jens", docRev.DocID) assert.NotNil(t, docRev.History) @@ -231,7 +146,7 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Doc doesn't exist, so miss the cache, and fail when getting the doc - docRev, err = cache.GetWithRev(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.Get(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) assert.Nil(t, docRev.BodyBytes) assert.Equal(t, int64(0), cacheHitCounter.Value()) @@ -240,7 +155,7 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Rev is already resident, but still issue GetDocument to check for later revisions - docRev, err = cache.GetWithRev(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.Get(base.TestCtx(t), "Jens", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) assert.Equal(t, "Jens", docRev.DocID) assert.NotNil(t, docRev.History) @@ -251,60 +166,7 @@ func TestBackingStore(t *testing.T) { assert.Equal(t, int64(1), getRevisionCounter.Value()) // Rev still doesn't exist, make sure it wasn't cached - docRev, err = cache.GetWithRev(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) - assertHTTPError(t, err, 404) - assert.Nil(t, docRev.BodyBytes) - assert.Equal(t, int64(1), cacheHitCounter.Value()) - assert.Equal(t, int64(3), cacheMissCounter.Value()) - assert.Equal(t, int64(3), getDocumentCounter.Value()) - assert.Equal(t, int64(1), getRevisionCounter.Value()) -} - -// TestBackingStoreCV: -// - Perform a Get on a doc by cv that is not currently in the rev cache, assert we get cache miss -// - Perform a Get again on the same doc and assert we get cache hit -// - Perform a Get on doc that doesn't exist, so misses cache and will fail on retrieving doc from bucket -// - Try a Get again on the same doc and assert it wasn't loaded into the cache as it doesn't exist -func TestBackingStoreCV(t *testing.T) { - cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &testBackingStore{[]string{"not_found"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - - // Get Rev for the first time - miss cache, but fetch the doc and revision to store - cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} - docRev, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) - assert.NoError(t, err) - assert.Equal(t, "doc1", docRev.DocID) - assert.NotNil(t, docRev.Channels) - assert.Equal(t, "test", docRev.CV.SourceID) - assert.Equal(t, uint64(123), docRev.CV.VersionCAS) - assert.Equal(t, int64(0), cacheHitCounter.Value()) - assert.Equal(t, int64(1), cacheMissCounter.Value()) - assert.Equal(t, int64(1), getDocumentCounter.Value()) - assert.Equal(t, int64(1), getRevisionCounter.Value()) - - // Perform a get on the same doc as above, check that we get cache hit - docRev, err = cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) - assert.NoError(t, err) - assert.Equal(t, "doc1", docRev.DocID) - assert.Equal(t, "test", docRev.CV.SourceID) - assert.Equal(t, uint64(123), docRev.CV.VersionCAS) - assert.Equal(t, int64(1), cacheHitCounter.Value()) - assert.Equal(t, int64(1), cacheMissCounter.Value()) - assert.Equal(t, int64(1), getDocumentCounter.Value()) - assert.Equal(t, int64(1), getRevisionCounter.Value()) - - // Doc doesn't exist, so miss the cache, and fail when getting the doc - cv = CurrentVersionVector{SourceID: "test11", VersionCAS: 100} - docRev, err = cache.GetWithCV(base.TestCtx(t), "not_found", &cv, RevCacheOmitBody, RevCacheOmitDelta) - assertHTTPError(t, err, 404) - assert.Nil(t, docRev.BodyBytes) - assert.Equal(t, int64(1), cacheHitCounter.Value()) - assert.Equal(t, int64(2), cacheMissCounter.Value()) - assert.Equal(t, int64(2), getDocumentCounter.Value()) - assert.Equal(t, int64(1), getRevisionCounter.Value()) - - // Rev still doesn't exist, make sure it wasn't cached - docRev, err = cache.GetWithCV(base.TestCtx(t), "not_found", &cv, RevCacheOmitBody, RevCacheOmitDelta) + docRev, err = cache.Get(base.TestCtx(t), "Peter", "1-abc", RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) assert.Nil(t, docRev.BodyBytes) assert.Equal(t, int64(1), cacheHitCounter.Value()) @@ -393,15 +255,15 @@ func TestBypassRevisionCache(t *testing.T) { assert.False(t, ok) // Get non-existing doc - _, err = rc.GetWithRev(base.TestCtx(t), "invalid", rev1, RevCacheOmitBody, RevCacheOmitDelta) + _, err = rc.Get(base.TestCtx(t), "invalid", rev1, RevCacheOmitBody, RevCacheOmitDelta) assert.True(t, base.IsDocNotFoundError(err)) // Get non-existing revision - _, err = rc.GetWithRev(base.TestCtx(t), key, "3-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, err = rc.Get(base.TestCtx(t), key, "3-abc", RevCacheOmitBody, RevCacheOmitDelta) assertHTTPError(t, err, 404) // Get specific revision - doc, err := rc.GetWithRev(base.TestCtx(t), key, rev1, RevCacheOmitBody, RevCacheOmitDelta) + doc, err := rc.Get(base.TestCtx(t), key, rev1, RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err) require.NotNil(t, doc) assert.Equal(t, `{"value":1234}`, string(doc.BodyBytes)) @@ -488,7 +350,7 @@ func TestPutExistingRevRevisionCacheAttachmentProperty(t *testing.T) { "value": 1235, BodyAttachments: map[string]interface{}{"myatt": map[string]interface{}{"content_type": "text/plain", "data": "SGVsbG8gV29ybGQh"}}, } - _, _, err = collection.PutExistingRevWithBody(ctx, docKey, rev2body, []string{rev2id, rev1id}, false, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, docKey, rev2body, []string{rev2id, rev1id}, false) assert.NoError(t, err, "Unexpected error calling collection.PutExistingRev") // Get the raw document directly from the bucket, validate _attachments property isn't found @@ -499,7 +361,7 @@ func TestPutExistingRevRevisionCacheAttachmentProperty(t *testing.T) { assert.False(t, ok, "_attachments property still present in document body retrieved from bucket: %#v", bucketBody) // Get the raw document directly from the revcache, validate _attachments property isn't found - docRevision, err := collection.revisionCache.GetWithRev(base.TestCtx(t), docKey, rev2id, RevCacheOmitBody, RevCacheOmitDelta) + docRevision, err := collection.revisionCache.Get(base.TestCtx(t), docKey, rev2id, RevCacheOmitBody, RevCacheOmitDelta) assert.NoError(t, err, "Unexpected error calling collection.revisionCache.Get") assert.NotContains(t, docRevision.BodyBytes, BodyAttachments, "_attachments property still present in document body retrieved from rev cache: %#v", bucketBody) _, ok = docRevision.Attachments["myatt"] @@ -526,12 +388,12 @@ func TestRevisionImmutableDelta(t *testing.T) { secondDelta := []byte("modified delta") // Trigger load into cache - _, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + _, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error adding to cache") cache.UpdateDelta(base.TestCtx(t), "doc1", "1-abc", RevisionDelta{ToRevID: "rev2", DeltaBytes: firstDelta}) // Retrieve from cache - retrievedRev, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + retrievedRev, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error retrieving from cache") assert.Equal(t, "rev2", retrievedRev.Delta.ToRevID) assert.Equal(t, firstDelta, retrievedRev.Delta.DeltaBytes) @@ -542,7 +404,7 @@ func TestRevisionImmutableDelta(t *testing.T) { assert.Equal(t, firstDelta, retrievedRev.Delta.DeltaBytes) // Retrieve again, validate delta is correct - updatedRev, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) + updatedRev, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) assert.NoError(t, err, "Error retrieving from cache") assert.Equal(t, "rev3", updatedRev.Delta.ToRevID) assert.Equal(t, secondDelta, updatedRev.Delta.DeltaBytes) @@ -557,8 +419,8 @@ func TestSingleLoad(t *testing.T) { cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} cache := NewLRURevisionCache(10, &testBackingStore{nil, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc123", RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(123), SourceID: "test"}, History: Revisions{"start": 1}}) - _, err := cache.GetWithRev(base.TestCtx(t), "doc123", "1-abc", true, false) + cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc123", RevID: "1-abc", History: Revisions{"start": 1}}) + _, err := cache.Get(base.TestCtx(t), "doc123", "1-abc", true, false) assert.NoError(t, err) } @@ -567,14 +429,14 @@ func TestConcurrentLoad(t *testing.T) { cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} cache := NewLRURevisionCache(10, &testBackingStore{nil, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc1", RevID: "1-abc", CV: &CurrentVersionVector{VersionCAS: uint64(1234), SourceID: "test"}, History: Revisions{"start": 1}}) + cache.Put(base.TestCtx(t), DocumentRevision{BodyBytes: []byte(`{"test":"1234"}`), DocID: "doc1", RevID: "1-abc", History: Revisions{"start": 1}}) // Trigger load into cache var wg sync.WaitGroup wg.Add(20) for i := 0; i < 20; i++ { go func() { - _, err := cache.GetWithRev(base.TestCtx(t), "doc1", "1-abc", true, false) + _, err := cache.Get(base.TestCtx(t), "doc1", "1-abc", true, false) assert.NoError(t, err) wg.Done() }() @@ -592,14 +454,14 @@ func TestRevisionCacheRemove(t *testing.T) { rev1id, _, err := collection.Put(ctx, "doc", Body{"val": 123}) assert.NoError(t, err) - docRev, err := collection.revisionCache.GetWithRev(base.TestCtx(t), "doc", rev1id, true, true) + docRev, err := collection.revisionCache.Get(base.TestCtx(t), "doc", rev1id, true, true) assert.NoError(t, err) assert.Equal(t, rev1id, docRev.RevID) assert.Equal(t, int64(0), db.DbStats.Cache().RevisionCacheMisses.Value()) - collection.revisionCache.RemoveWithRev("doc", rev1id) + collection.revisionCache.Remove("doc", rev1id) - docRev, err = collection.revisionCache.GetWithRev(base.TestCtx(t), "doc", rev1id, true, true) + docRev, err = collection.revisionCache.Get(base.TestCtx(t), "doc", rev1id, true, true) assert.NoError(t, err) assert.Equal(t, rev1id, docRev.RevID) assert.Equal(t, int64(1), db.DbStats.Cache().RevisionCacheMisses.Value()) @@ -620,59 +482,6 @@ func TestRevisionCacheRemove(t *testing.T) { assert.Equal(t, int64(1), db.DbStats.Cache().RevisionCacheMisses.Value()) } -// TestRevCacheOperationsCV: -// - Create doc revision, put the revision into the cache -// - Perform a get on that doc by cv and assert that it has correctly been handled -// - Updated doc revision and upsert the cache -// - Get the updated doc by cv and assert iot has been correctly handled -// - Peek the doc by cv and assert it has been found -// - Peek the rev id cache for the same doc and assert that doc also has been updated in that lookup cache -// - Remove the doc by cv, and asser that the doc is gone -func TestRevCacheOperationsCV(t *testing.T) { - cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - - cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} - documentRevision := DocumentRevision{ - DocID: "doc1", - RevID: "1-abc", - BodyBytes: []byte(`{"test":"1234"}`), - Channels: base.SetOf("chan1"), - History: Revisions{"start": 1}, - CV: &cv, - } - cache.Put(base.TestCtx(t), documentRevision) - - docRev, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) - require.NoError(t, err) - assert.Equal(t, "doc1", docRev.DocID) - assert.Equal(t, base.SetOf("chan1"), docRev.Channels) - assert.Equal(t, "test", docRev.CV.SourceID) - assert.Equal(t, uint64(123), docRev.CV.VersionCAS) - assert.Equal(t, int64(1), cacheHitCounter.Value()) - assert.Equal(t, int64(0), cacheMissCounter.Value()) - - documentRevision.BodyBytes = []byte(`{"test":"12345"}`) - - cache.Upsert(base.TestCtx(t), documentRevision) - - docRev, err = cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) - require.NoError(t, err) - assert.Equal(t, "doc1", docRev.DocID) - assert.Equal(t, base.SetOf("chan1"), docRev.Channels) - assert.Equal(t, "test", docRev.CV.SourceID) - assert.Equal(t, uint64(123), docRev.CV.VersionCAS) - assert.Equal(t, []byte(`{"test":"12345"}`), docRev.BodyBytes) - assert.Equal(t, int64(2), cacheHitCounter.Value()) - assert.Equal(t, int64(0), cacheMissCounter.Value()) - - // remove the doc rev from the cache and assert that the document is no longer present in cache - cache.RemoveWithCV("doc1", &cv) - assert.Equal(t, 0, len(cache.cache)) - assert.Equal(t, 0, len(cache.hlvCache)) - assert.Equal(t, 0, cache.lruList.Len()) -} - func BenchmarkRevisionCacheRead(b *testing.B) { base.SetUpBenchmarkLogging(b, base.LevelDebug, base.KeyAll) @@ -683,7 +492,7 @@ func BenchmarkRevisionCacheRead(b *testing.B) { // trigger load into cache for i := 0; i < 5000; i++ { - _, _ = cache.GetWithRev(ctx, fmt.Sprintf("doc%d", i), "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, _ = cache.Get(ctx, fmt.Sprintf("doc%d", i), "1-abc", RevCacheOmitBody, RevCacheOmitDelta) } b.ResetTimer() @@ -691,147 +500,7 @@ func BenchmarkRevisionCacheRead(b *testing.B) { // GET the document until test run has completed for pb.Next() { docId := fmt.Sprintf("doc%d", rand.Intn(5000)) - _, _ = cache.GetWithRev(ctx, docId, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) + _, _ = cache.Get(ctx, docId, "1-abc", RevCacheOmitBody, RevCacheOmitDelta) } }) } - -// TestLoaderMismatchInCV: -// - Get doc that is not in cache by CV to trigger a load from bucket -// - Ensure the CV passed into teh GET operation won't match the doc in teh bucket -// - Assert we get error and the value is not loaded into the cache -func TestLoaderMismatchInCV(t *testing.T) { - cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - - // create cv with incorrect version to the one stored in backing store - cv := CurrentVersionVector{SourceID: "test", VersionCAS: 1234} - - _, err := cache.GetWithCV(base.TestCtx(t), "doc1", &cv, RevCacheOmitBody, RevCacheOmitDelta) - require.Error(t, err) - assert.ErrorContains(t, err, "mismatch between specified current version and fetched document current version for doc") - assert.Equal(t, int64(0), cacheHitCounter.Value()) - assert.Equal(t, int64(1), cacheMissCounter.Value()) - assert.Equal(t, 0, cache.lruList.Len()) - assert.Equal(t, 0, len(cache.hlvCache)) - assert.Equal(t, 0, len(cache.cache)) -} - -// TestConcurrentLoadByCVAndRevOnCache: -// - Create cache -// - Now perform two concurrent Gets, one by CV and one by revid on a document that doesn't exist in the cache -// - This will trigger two concurrent loads from bucket in the CV code path and revid code path -// - In doing so we will have two processes trying to update lookup maps at the same time and a race condition will appear -// - In doing so will cause us to potentially have two of teh same elements the cache, one with nothing referencing it -// - Assert after both gets are processed, that the cache only has one element in it and that both lookup maps have only one -// element -// - Grab the single element in the list and assert that both maps point to that element in the cache list -func TestConcurrentLoadByCVAndRevOnCache(t *testing.T) { - cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - - ctx := base.TestCtx(t) - - wg := sync.WaitGroup{} - wg.Add(2) - - cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} - go func() { - _, err := cache.GetWithRev(ctx, "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) - require.NoError(t, err) - wg.Done() - }() - - go func() { - _, err := cache.GetWithCV(ctx, "doc1", &cv, RevCacheOmitBody, RevCacheIncludeDelta) - require.NoError(t, err) - wg.Done() - }() - - wg.Wait() - - revElement := cache.cache[IDAndRev{RevID: "1-abc", DocID: "doc1"}] - cvElement := cache.hlvCache[IDandCV{DocID: "doc1", Source: "test", Version: 123}] - assert.Equal(t, 1, cache.lruList.Len()) - assert.Equal(t, 1, len(cache.cache)) - assert.Equal(t, 1, len(cache.hlvCache)) - // grab the single elem in the cache list - cacheElem := cache.lruList.Front() - // assert that both maps point to the same element in cache list - assert.Equal(t, cacheElem, cvElement) - assert.Equal(t, cacheElem, revElement) -} - -// TestGetActive: -// - Create db, create a doc on the db -// - Call GetActive pn the rev cache and assert that the rev and cv are correct -func TestGetActive(t *testing.T) { - db, ctx := setupTestDB(t) - defer db.Close(ctx) - collection := GetSingleDatabaseCollectionWithUser(t, db) - - rev1id, doc, err := collection.Put(ctx, "doc", Body{"val": 123}) - require.NoError(t, err) - - expectedCV := CurrentVersionVector{ - SourceID: db.BucketUUID, - VersionCAS: doc.Cas, - } - - // remove the entry form the rev cache to force teh cache to not have the active version in it - collection.revisionCache.RemoveWithCV("doc", &expectedCV) - - // call get active to get teh active version from the bucket - docRev, err := collection.revisionCache.GetActive(base.TestCtx(t), "doc", true) - assert.NoError(t, err) - assert.Equal(t, rev1id, docRev.RevID) - assert.Equal(t, expectedCV, *docRev.CV) -} - -// TestConcurrentPutAndGetOnRevCache: -// - Perform a Get with rev on the cache for a doc not in the cache -// - Concurrently perform a PUT on the cache with doc revision the same as the GET -// - Assert we get consistent cache with only 1 entry in lookup maps and the cache itself -func TestConcurrentPutAndGetOnRevCache(t *testing.T) { - cacheHitCounter, cacheMissCounter, getDocumentCounter, getRevisionCounter := base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{}, base.SgwIntStat{} - cache := NewLRURevisionCache(10, &testBackingStore{[]string{"test_doc"}, &getDocumentCounter, &getRevisionCounter}, &cacheHitCounter, &cacheMissCounter) - - ctx := base.TestCtx(t) - - wg := sync.WaitGroup{} - wg.Add(2) - - cv := CurrentVersionVector{SourceID: "test", VersionCAS: 123} - docRev := DocumentRevision{ - DocID: "doc1", - RevID: "1-abc", - BodyBytes: []byte(`{"test":"1234"}`), - Channels: base.SetOf("chan1"), - History: Revisions{"start": 1}, - CV: &cv, - } - - go func() { - _, err := cache.GetWithRev(ctx, "doc1", "1-abc", RevCacheOmitBody, RevCacheIncludeDelta) - require.NoError(t, err) - wg.Done() - }() - - go func() { - cache.Put(ctx, docRev) - wg.Done() - }() - - wg.Wait() - - revElement := cache.cache[IDAndRev{RevID: "1-abc", DocID: "doc1"}] - cvElement := cache.hlvCache[IDandCV{DocID: "doc1", Source: "test", Version: 123}] - - assert.Equal(t, 1, cache.lruList.Len()) - assert.Equal(t, 1, len(cache.cache)) - assert.Equal(t, 1, len(cache.hlvCache)) - cacheElem := cache.lruList.Front() - // assert that both maps point to the same element in cache list - assert.Equal(t, cacheElem, cvElement) - assert.Equal(t, cacheElem, revElement) -} diff --git a/db/revision_test.go b/db/revision_test.go index 5601dd4eda..683e477a4d 100644 --- a/db/revision_test.go +++ b/db/revision_test.go @@ -131,7 +131,7 @@ func TestBackupOldRevision(t *testing.T) { // create rev 2 and check backups for both revs rev2ID := "2-abc" - _, _, err = collection.PutExistingRevWithBody(ctx, docID, Body{"test": true, "updated": true}, []string{rev2ID, rev1ID}, true, ExistingVersionWithUpdateToHLV) + _, _, err = collection.PutExistingRevWithBody(ctx, docID, Body{"test": true, "updated": true}, []string{rev2ID, rev1ID}, true) require.NoError(t, err) // now in all cases we'll have rev 1 backed up (for at least 5 minutes) diff --git a/rest/api_test.go b/rest/api_test.go index f5ef62d435..377e8b06d8 100644 --- a/rest/api_test.go +++ b/rest/api_test.go @@ -2746,97 +2746,6 @@ func TestNullDocHandlingForMutable1xBody(t *testing.T) { assert.Contains(t, err.Error(), "null doc body for doc") } -// TestPutDocUpdateVersionVector: -// - Put a doc and assert that the versions and the source for the hlv is correctly updated -// - Update that doc and assert HLV has also been updated -// - Delete the doc and assert that the HLV has been updated in deletion event -func TestPutDocUpdateVersionVector(t *testing.T) { - rt := NewRestTester(t, nil) - defer rt.Close() - - bucketUUID, err := rt.GetDatabase().Bucket.UUID() - require.NoError(t, err) - - resp := rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"key": "value"}`) - RequireStatus(t, resp, http.StatusCreated) - - syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS := base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, bucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) - - // Put a new revision of this doc and assert that the version vector SourceID and Version is updated - resp = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1?rev="+syncData.CurrentRev, `{"key1": "value1"}`) - RequireStatus(t, resp, http.StatusCreated) - - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS = base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, bucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) - - // Delete doc and assert that the version vector SourceID and Version is updated - resp = rt.SendAdminRequest(http.MethodDelete, "/{{.keyspace}}/doc1?rev="+syncData.CurrentRev, "") - RequireStatus(t, resp, http.StatusOK) - - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS = base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, bucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) -} - -// TestHLVOnPutWithImportRejection: -// - Put a doc successfully and assert the HLV is updated correctly -// - Put a doc that will be rejected by the custom import filter -// - Assert that the HLV values on the sync data are still correctly updated/preserved -func TestHLVOnPutWithImportRejection(t *testing.T) { - base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport) - importFilter := `function (doc) { return doc.type == "mobile"}` - rtConfig := RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - AutoImport: false, - ImportFilter: &importFilter, - }}, - } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - bucketUUID, err := rt.GetDatabase().Bucket.UUID() - require.NoError(t, err) - - resp := rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"type": "mobile"}`) - RequireStatus(t, resp, http.StatusCreated) - - syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS := base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, bucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) - - // Put a doc that will be rejected by the import filter on the attempt to perform on demand import for write - resp = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc2", `{"type": "not-mobile"}`) - RequireStatus(t, resp, http.StatusCreated) - - // assert that the hlv is correctly updated and in tact after the import was cancelled on the doc - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc2") - assert.NoError(t, err) - uintCAS = base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, bucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) -} - func TestTombstoneCompactionAPI(t *testing.T) { rt := NewRestTester(t, nil) defer rt.Close() diff --git a/rest/attachment_test.go b/rest/attachment_test.go index 053139deca..0b480d4127 100644 --- a/rest/attachment_test.go +++ b/rest/attachment_test.go @@ -1060,7 +1060,6 @@ func TestAttachmentContentType(t *testing.T) { } func TestBasicAttachmentRemoval(t *testing.T) { - t.Skip("Disabled pending CBG-3503") rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) defer rt.Close() @@ -2224,7 +2223,6 @@ func TestAttachmentDeleteOnPurge(t *testing.T) { } func TestAttachmentDeleteOnExpiry(t *testing.T) { - t.Skip("Disabled pending CBG-3503") rt := NewRestTester(t, nil) defer rt.Close() @@ -2262,204 +2260,184 @@ func TestAttachmentDeleteOnExpiry(t *testing.T) { } func TestUpdateExistingAttachment(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } + }) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) + doc1Version := rt.PutDoc(doc1ID, `{}`) + doc2Version := rt.PutDoc(doc2ID, `{}`) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) - defer btc.Close() - // Add doc1 and doc2 - doc1Version := btc.rt.PutDoc(doc1ID, `{}`) - doc2Version := btc.rt.PutDoc(doc2ID, `{}`) + require.NoError(t, rt.WaitForPendingChanges()) - require.NoError(t, btc.rt.WaitForPendingChanges()) - - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) - _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) - require.True(t, ok) - _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) - require.True(t, ok) + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok := btc.WaitForVersion(doc1ID, doc1Version) + require.True(t, ok) + _, ok = btc.WaitForVersion(doc2ID, doc2Version) + require.True(t, ok) - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) - _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) - require.NoError(t, err) + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) + require.NoError(t, err) - assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - doc1, err := btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), doc1ID, db.DocUnmarshalAll) - assert.NoError(t, err) + doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + assert.NoError(t, err) - assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) - req := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, "attachmentB", string(req.BodyBytes())) - }) + req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, "attachmentB", string(req.BodyBytes())) } // TestPushUnknownAttachmentAsStub sets revpos to an older generation, for an attachment that doesn't exist on the server. // Verifies that getAttachment is triggered, and attachment is properly persisted. func TestPushUnknownAttachmentAsStub(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - const doc1ID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) + }) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + assert.NoError(t, err) + defer btc.Close() - opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) - defer btc.Close() - // Add doc1 and doc2 - doc1Version := btc.rt.PutDoc(doc1ID, `{}`) + const doc1ID = "doc1" + doc1Version := rt.PutDoc(doc1ID, `{}`) - require.NoError(t, btc.rt.WaitForPendingChanges()) + require.NoError(t, rt.WaitForPendingChanges()) - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) + err = btc.StartOneshotPull() + assert.NoError(t, err) - _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) - require.True(t, ok) + _, ok := btc.WaitForVersion(doc1ID, doc1Version) + require.True(t, ok) - // force attachment into test client's store to validate it's fetched - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - contentType := "text/plain" + // force attachment into test client's store to validate it's fetched + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + contentType := "text/plain" - length, digest, err := btcRunner.saveAttachment(btc.id, contentType, attachmentAData) - require.NoError(t, err) - // Update doc1, include reference to non-existing attachment with recent revpos - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) - require.NoError(t, err) + length, digest, err := btc.saveAttachment(contentType, attachmentAData) + require.NoError(t, err) + // Update doc1, include reference to non-existing attachment with recent revpos + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) + require.NoError(t, err) - require.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + require.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + + // verify that attachment exists on document and was persisted + attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) - // verify that attachment exists on document and was persisted - attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) - }) } func TestMinRevPosWorkToAvoidUnnecessaryProveAttachment(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ AllowConflicts: base.BoolPtr(true), }, }, - } - - btcRunner := NewBlipTesterClientRunner(t) - const docID = "doc" + }) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) - defer btc.Close() - // Push an initial rev with attachment data - initialVersion := btc.rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err := btc.rt.WaitForPendingChanges() - assert.NoError(t, err) + // Push an initial rev with attachment data + const docID = "doc" + initialVersion := rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err = rt.WaitForPendingChanges() + assert.NoError(t, err) - // Replicate data to client and ensure doc arrives - err = btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) - _, found := btcRunner.WaitForVersion(btc.id, docID, initialVersion) - assert.True(t, found) + // Replicate data to client and ensure doc arrives + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, found := btc.WaitForVersion(docID, initialVersion) + assert.True(t, found) - // Push a revision with a bunch of history simulating doc updated on mobile device - // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment - proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() - revid, err := btcRunner.PushRevWithHistory(btc.id, docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + // Push a revision with a bunch of history simulating doc updated on mobile device + // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment + proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() + revid, err := btc.PushRevWithHistory(docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) - // Push another bunch of history - _, err = btcRunner.PushRevWithHistory(btc.id, docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) - }) + // Push another bunch of history + _, err = btc.PushRevWithHistory(docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) } func TestAttachmentWithErroneousRevPos(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - - btcRunner := NewBlipTesterClientRunner(t) + }) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) - defer btc.Close() - // Create rev 1 with the hello.txt attachment - const docID = "doc" - version := btc.rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err := btc.rt.WaitForPendingChanges() - assert.NoError(t, err) + // Create rev 1 with the hello.txt attachment + const docID = "doc" + version := rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err = rt.WaitForPendingChanges() + assert.NoError(t, err) - // Pull rev and attachment down to client - err = btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) - _, found := btcRunner.WaitForVersion(btc.id, docID, version) - assert.True(t, found) + // Pull rev and attachment down to client + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, found := btc.WaitForVersion(docID, version) + assert.True(t, found) - // Add an attachment to client - btcRunner.AttachmentsLock(btc.id).Lock() - btcRunner.Attachments(btc.id)["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") - btcRunner.AttachmentsLock(btc.id).Unlock() + // Add an attachment to client + btc.AttachmentsLock().Lock() + btc.Attachments()["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") + btc.AttachmentsLock().Unlock() - // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment - _, err = btcRunner.PushRevWithHistory(btc.id, docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) - require.NoError(t, err) + // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment + _, err = btc.PushRevWithHistory(docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) + require.NoError(t, err) - // Ensure message and attachment is pushed up - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + // Ensure message and attachment is pushed up + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) - // Get the attachment and ensure the data is updated - resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) - }) + // Get the attachment and ensure the data is updated + resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) } // CBG-2004: Test that prove attachment over Blip works correctly when receiving a ErrAttachmentNotFound @@ -2600,79 +2578,74 @@ func TestPutInvalidAttachment(t *testing.T) { // validates that proveAttachment isn't being invoked when the attachment is already present and the // digest doesn't change, regardless of revpos. func TestCBLRevposHandling(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } + }) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + assert.NoError(t, err) + defer btc.Close() - btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) + doc1Version := rt.PutDoc(doc1ID, `{}`) + doc2Version := rt.PutDoc(doc2ID, `{}`) + require.NoError(t, rt.WaitForPendingChanges()) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) - defer btc.Close() - - doc1Version := btc.rt.PutDoc(doc1ID, `{}`) - doc2Version := btc.rt.PutDoc(doc2ID, `{}`) - require.NoError(t, btc.rt.WaitForPendingChanges()) + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok := btc.WaitForVersion(doc1ID, doc1Version) + require.True(t, ok) + _, ok = btc.WaitForVersion(doc2ID, doc2Version) + require.True(t, ok) - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) - _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) - require.True(t, ok) - _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) - require.True(t, ok) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) - assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + // Update doc1, don't change attachment, use correct revpos + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) + require.NoError(t, err) - // Update doc1, don't change attachment, use correct revpos - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) - require.NoError(t, err) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. + doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) + require.NoError(t, err) - // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. - doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) - require.NoError(t, err) + // Validate attachment exists + attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) - // Validate attachment exists - attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + attachmentPushCount := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment + _, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) + require.NoError(t, err) - attachmentPushCount := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment - _, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) - require.NoError(t, err) + // Validate attachment exists and is updated + attResponse = rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) - // Validate attachment exists and is updated - attResponse = btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + attachmentPushCountAfter := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) - attachmentPushCountAfter := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) - }) } // Helper_Functions diff --git a/rest/blip_api_attachment_test.go b/rest/blip_api_attachment_test.go index 2f63d13880..266e580678 100644 --- a/rest/blip_api_attachment_test.go +++ b/rest/blip_api_attachment_test.go @@ -43,63 +43,56 @@ func TestBlipPushPullV2AttachmentV2Client(t *testing.T) { }, GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btcRunner := NewBlipTesterClientRunner(t) - // given this test is for v2 protocol, skip version vector test - btcRunner.SkipVersionVectorInitialization = true - const docID = "doc1" - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() + opts := &BlipTesterClientOpts{} + opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} + btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) + require.NoError(t, err) + defer btc.Close() - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) + err = btc.StartPull() + assert.NoError(t, err) + const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := btc.rt.PutDoc(docID, bodyText) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) - data, ok := btcRunner.WaitForVersion(btc.id, docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + data, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) - require.NoError(t, err) + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btc.PushRev(docID, version, []byte(bodyText)) + require.NoError(t, err) - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) - respBody := btc.rt.GetDocVersion(docID, version) + respBody := rt.GetDocVersion(docID, version) - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) - assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) - }) + assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) } // Test pushing and pulling v2 attachments with v3 client @@ -120,59 +113,54 @@ func TestBlipPushPullV2AttachmentV3Client(t *testing.T) { }, GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - btcRunner := NewBlipTesterClientRunner(t) + err = btc.StartPull() + assert.NoError(t, err) const docID = "doc1" - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) - - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := btc.rt.PutDoc(docID, bodyText) - - data, ok := btcRunner.WaitForVersion(btc.id, docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := btc.rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) - }) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) + + data, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btc.PushRev(docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) } // TestBlipProveAttachmentV2 ensures that CBL's proveAttachment for deduplication is working correctly even for v2 attachments which aren't de-duped on the server side. @@ -181,6 +169,17 @@ func TestBlipProveAttachmentV2(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + require.NoError(t, err) + defer btc.Close() + + err = btc.StartPull() + assert.NoError(t, err) const ( doc1ID = "doc1" @@ -197,45 +196,29 @@ func TestBlipProveAttachmentV2(t *testing.T) { attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - btcRunner := NewBlipTesterClientRunner(t) - btcRunner.SkipVersionVectorInitialization = true // v2 protocol test + // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, + // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version := rt.PutDoc(doc1ID, doc1Body) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + data, ok := btc.WaitForVersion(doc1ID, doc1Version) + require.True(t, ok) + bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - defer btc.Close() - - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) - - // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, - // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version := btc.rt.PutDoc(doc1ID, doc1Body) - - data, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) - require.True(t, ok) - bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) - - // create doc2 now that we know the client has the attachment - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version := btc.rt.PutDoc(doc2ID, doc2Body) - - data, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) - require.True(t, ok) - bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) - - assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) - assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) - assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) - assert.Equal(t, int64(len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) - }) + // create doc2 now that we know the client has the attachment + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version := rt.PutDoc(doc2ID, doc2Body) + + data, ok = btc.WaitForVersion(doc2ID, doc2Version) + require.True(t, ok) + bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) + assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) + assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) + assert.Equal(t, int64(len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) } // TestBlipProveAttachmentV2Push ensures that CBL's attachment deduplication is ignored for push replications - resulting in new server-side digests and duplicated attachment data (v2 attachment format). @@ -244,6 +227,15 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + require.NoError(t, err) + defer btc.Close() + const ( doc1ID = "doc1" doc2ID = "doc2" @@ -259,38 +251,26 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { // attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - btcRunner := NewBlipTesterClientRunner(t) - btcRunner.SkipVersionVectorInitialization = true // v2 protocol test + // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version, err := btc.PushRev(doc1ID, EmptyDocVersion(), []byte(doc1Body)) + require.NoError(t, err) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + err = rt.WaitForVersion(doc1ID, doc1Version) + require.NoError(t, err) - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - defer btc.Close() - // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version, err := btcRunner.PushRev(btc.id, doc1ID, EmptyDocVersion(), []byte(doc1Body)) - require.NoError(t, err) - - err = btc.rt.WaitForVersion(doc1ID, doc1Version) - require.NoError(t, err) - - // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version, err := btcRunner.PushRev(btc.id, doc2ID, EmptyDocVersion(), []byte(doc2Body)) - require.NoError(t, err) - - err = btc.rt.WaitForVersion(doc2ID, doc2Version) - require.NoError(t, err) - - assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) - assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) - assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(2*len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) - }) + // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version, err := btc.PushRev(doc2ID, EmptyDocVersion(), []byte(doc2Body)) + require.NoError(t, err) + + err = rt.WaitForVersion(doc2ID, doc2Version) + require.NoError(t, err) + + assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) + assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) + assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(2*len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) } func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { @@ -298,139 +278,130 @@ func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - btcRunner := NewBlipTesterClientRunner(t) + err = btc.StartPull() + assert.NoError(t, err) const docID = "doc1" - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) - - // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "2-abc", revId) - - // Wait for the documents to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, - // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err = btcRunner.PushRevWithHistory(btc.id, docID, revId, []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(4) - assert.True(t, ok) - - resp = btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) - }) + // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btc.PushRevWithHistory(docID, "", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "2-abc", revId) + + // Wait for the documents to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, + // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err = btc.PushRevWithHistory(docID, revId, []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(4) + assert.True(t, ok) + + resp = rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) } func TestBlipPushPullNewAttachmentNoCommonAncestor(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := RestTesterConfig{ GuestEnabled: true, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + err = btc.StartPull() + assert.NoError(t, err) const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) - - // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. - // rev tree pruning on the CBL side, so 1-abc no longer exists. - // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "2-abc", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(4), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) - }) + + // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. + // rev tree pruning on the CBL side, so 1-abc no longer exists. + // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btc.PushRevWithHistory(docID, "2-abc", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(4), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) } // Test Attachment replication behavior described here: https://github.com/couchbase/couchbase-lite-core/wiki/Replication-Protocol @@ -536,181 +507,163 @@ func TestPutAttachmentViaBlipGetViaBlip(t *testing.T) { // TestBlipAttachNameChange tests CBL handling - attachments with changed names are sent as stubs, and not new attachments func TestBlipAttachNameChange(t *testing.T) { - base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client1.Close() - - attachmentA := []byte("attachmentA") - attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) - digest := db.Sha1DigestKey(attachmentA) - - // Push initial attachment data - version, err := btcRunner.PushRev(client1.id, "doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) - require.NoError(t, err) - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) - bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attachmentA) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - err = client1.rt.WaitForVersion("doc", version) - require.NoError(t, err) - - // Check if attachment is still in bucket - bucketAttachmentA, _, err = client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) - assert.NoError(t, err) - assert.Equal(t, bucketAttachmentA, attachmentA) - - resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attachmentA, resp.BodyBytes()) }) + defer rt.Close() + + client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client1.Close() + base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + + attachmentA := []byte("attachmentA") + attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) + digest := db.Sha1DigestKey(attachmentA) + + // Push initial attachment data + version, err := client1.PushRev("doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) + require.NoError(t, err) + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) + bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attachmentA) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + version, err = client1.PushRev("doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + err = rt.WaitForVersion("doc", version) + require.NoError(t, err) + + // Check if attachment is still in bucket + bucketAttachmentA, _, err = rt.GetSingleDataStore().GetRaw(attachmentAKey) + assert.NoError(t, err) + assert.Equal(t, bucketAttachmentA, attachmentA) + + resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attachmentA, resp.BodyBytes()) } // TestBlipLegacyAttachNameChange ensures that CBL name changes for legacy attachments are handled correctly func TestBlipLegacyAttachNameChange(t *testing.T) { - base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client1.Close() - // Create document in the bucket with a legacy attachment - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) - - // Get the document and grab the revID. - docVersion, _ := client1.rt.GetDoc(docID) - - // Store the document and attachment on the test client - err := btcRunner.StoreRevOnClient(client1.id, docID, docVersion.RevID, rawDoc) - - require.NoError(t, err) - btcRunner.AttachmentsLock(client1.id).Lock() - btcRunner.Attachments(client1.id)[digest] = attBody - btcRunner.AttachmentsLock(client1.id).Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - docVersion, err = btcRunner.PushRev(client1.id, "doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = client1.rt.WaitForVersion("doc", docVersion) - require.NoError(t, err) - - resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) }) + defer rt.Close() + client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client1.Close() + base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + + // Create document in the bucket with a legacy attachment + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) + + // Get the document and grab the revID. + docVersion, _ := rt.GetDoc(docID) + + // Store the document and attachment on the test client + err = client1.StoreRevOnClient(docID, docVersion.RevID, rawDoc) + + require.NoError(t, err) + client1.AttachmentsLock().Lock() + client1.Attachments()[digest] = attBody + client1.AttachmentsLock().Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + docVersion, err = client1.PushRev("doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = rt.WaitForVersion("doc", docVersion) + require.NoError(t, err) + + resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) } // TestBlipLegacyAttachDocUpdate ensures that CBL updates for documents associated with legacy attachments are handled correctly func TestBlipLegacyAttachDocUpdate(t *testing.T) { - base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client1.Close() - // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - attName := "hi.txt" - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) - - version, _ := client1.rt.GetDoc(docID) - - // Store the document and attachment on the test client - err := btcRunner.StoreRevOnClient(client1.id, docID, version.RevID, rawDoc) - require.NoError(t, err) - btcRunner.AttachmentsLock(client1.id).Lock() - btcRunner.Attachments(client1.id)[digest] = attBody - btcRunner.AttachmentsLock(client1.id).Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - dataStore := client1.rt.GetSingleDataStore() - bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Update the document, leaving body intact - version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = client1.rt.WaitForVersion("doc", version) - require.NoError(t, err) - - resp := client1.rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) - - // Validate that the attachment hasn't been migrated to V2 - v1Key := db.MakeAttachmentKey(1, "doc", digest) - v1Body, _, err := dataStore.GetRaw(v1Key) - require.NoError(t, err) - require.EqualValues(t, attBody, v1Body) - - v2Key := db.MakeAttachmentKey(2, "doc", digest) - _, _, err = dataStore.GetRaw(v2Key) - require.Error(t, err) - // Confirm correct type of error for both integration test and Walrus - if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { - var keyValueErr *gocb.KeyValueError - require.True(t, errors.As(err, &keyValueErr)) - //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) - require.Equal(t, keyValueErr.DocumentID, v2Key) - } }) + defer rt.Close() + client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client1.Close() + base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + + // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + attName := "hi.txt" + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) + + version, _ := rt.GetDoc(docID) + + // Store the document and attachment on the test client + err = client1.StoreRevOnClient(docID, version.RevID, rawDoc) + require.NoError(t, err) + client1.AttachmentsLock().Lock() + client1.Attachments()[digest] = attBody + client1.AttachmentsLock().Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + dataStore := rt.GetSingleDataStore() + bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Update the document, leaving body intact + version, err = client1.PushRev("doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = rt.WaitForVersion("doc", version) + require.NoError(t, err) + + resp := rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + + // Validate that the attachment hasn't been migrated to V2 + v1Key := db.MakeAttachmentKey(1, "doc", digest) + v1Body, _, err := dataStore.GetRaw(v1Key) + require.NoError(t, err) + require.EqualValues(t, attBody, v1Body) + + v2Key := db.MakeAttachmentKey(2, "doc", digest) + _, _, err = dataStore.GetRaw(v2Key) + require.Error(t, err) + // Confirm correct type of error for both integration test and Walrus + if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { + var keyValueErr *gocb.KeyValueError + require.True(t, errors.As(err, &keyValueErr)) + //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) + require.Equal(t, keyValueErr.DocumentID, v2Key) + } } // TestAttachmentComputeStat: @@ -723,33 +676,31 @@ func TestAttachmentComputeStat(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() + opts := &BlipTesterClientOpts{} + opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} + btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) + require.NoError(t, err) + defer btc.Close() + syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() - syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() + err = btc.StartPull() + assert.NoError(t, err) + const docID = "doc1" - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := btc.rt.PutDoc(docID, bodyText) + // Wait for the document to be replicated to client. + data, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) - // Wait for the document to be replicated to client. - data, ok := btcRunner.WaitForVersion(btc.id, docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + // assert the attachment read compute stat is incremented + require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) - // assert the attachment read compute stat is incremented - require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) - }) } diff --git a/rest/blip_api_collections_test.go b/rest/blip_api_collections_test.go index 5663e1227e..7839daa4ee 100644 --- a/rest/blip_api_collections_test.go +++ b/rest/blip_api_collections_test.go @@ -28,344 +28,322 @@ func TestBlipGetCollections(t *testing.T) { // checkpointIDWithError := "checkpointError" const defaultScopeAndCollection = "_default._default" - rtConfig := &RestTesterConfig{GuestEnabled: true} - btcRunner := NewBlipTesterClientRunner(t) + rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{GuestEnabled: true}, 1) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTesterMultipleCollections(t, rtConfig, 1) - defer rt.Close() - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + btc, err := NewBlipTesterClientOptsWithRT(t, rt, + &BlipTesterClientOpts{ SkipCollectionsInitialization: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := btc.rt.GetSingleTestDatabaseCollection() - scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - testCases := []struct { - name string - requestBody db.GetCollectionsRequestBody - resultBody []db.Body - errorCode string - }{ - { - name: "noDocInDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{defaultScopeAndCollection}, - }, - resultBody: []db.Body{nil}, - errorCode: "", + }, + ) + require.NoError(t, err) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := rt.GetSingleTestDatabaseCollection() + scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + testCases := []struct { + name string + requestBody db.GetCollectionsRequestBody + resultBody []db.Body + errorCode string + }{ + { + name: "noDocInDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{defaultScopeAndCollection}, }, - { - name: "mismatchedLengthOnInput", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id", "id2"}, - Collections: []string{defaultScopeAndCollection}, - }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), + resultBody: []db.Body{nil}, + errorCode: "", + }, + { + name: "mismatchedLengthOnInput", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id", "id2"}, + Collections: []string{defaultScopeAndCollection}, }, - { - name: "inDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{defaultScopeAndCollection}, - }, - resultBody: []db.Body{nil}, - errorCode: "", + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), + }, + { + name: "inDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{defaultScopeAndCollection}, }, - { - name: "badScopeSpecificationEmptyString", - // bad scope specification - empty string - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{""}, - }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), + resultBody: []db.Body{nil}, + errorCode: "", + }, + { + name: "badScopeSpecificationEmptyString", + // bad scope specification - empty string + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{""}, }, - { - name: "presentNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{scopeAndCollection}, - }, - resultBody: []db.Body{checkpoint1Body}, - errorCode: "", + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), + }, + { + name: "presentNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{scopeAndCollection}, }, - { - name: "unseenInNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{scopeAndCollection}, - }, - resultBody: []db.Body{db.Body{}}, - errorCode: "", + resultBody: []db.Body{checkpoint1Body}, + errorCode: "", + }, + { + name: "unseenInNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{scopeAndCollection}, }, - // { - // name: "checkpointExistsWithErrorInNonDefaultCollection", - // requestBody: db.GetCollectionsRequestBody{ - // CheckpointIDs: []string{checkpointIDWithError}, - // Collections: []string{scopeAndCollection}, - // }, - // resultBody: []db.Body{nil}, - // errorCode: "", - // }, - } + resultBody: []db.Body{db.Body{}}, + errorCode: "", + }, + // { + // name: "checkpointExistsWithErrorInNonDefaultCollection", + // requestBody: db.GetCollectionsRequestBody{ + // CheckpointIDs: []string{checkpointIDWithError}, + // Collections: []string{scopeAndCollection}, + // }, + // resultBody: []db.Body{nil}, + // errorCode: "", + // }, + } - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the norev handler - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) - require.Equal(t, errorCode, testCase.errorCode) - if testCase.errorCode != "" { - return - } - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) - - require.Equal(t, testCase.resultBody, checkpoints) - }) - } - }) + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the norev handler + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) + require.Equal(t, errorCode, testCase.errorCode) + if testCase.errorCode != "" { + return + } + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + + require.Equal(t, testCase.resultBody, checkpoints) + }) + } } func TestBlipReplicationNoDefaultCollection(t *testing.T) { base.TestRequiresCollections(t) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := btc.rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - - subChangesRequest := blip.NewRequest() - subChangesRequest.SetProfile(db.MessageSubChanges) - - require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) - resp := subChangesRequest.Response() - require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) }) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + + subChangesRequest := blip.NewRequest() + subChangesRequest.SetProfile(db.MessageSubChanges) + + require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) + resp := subChangesRequest.Response() + require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) } func TestBlipGetCollectionsAndSetCheckpoint(t *testing.T) { base.TestRequiresCollections(t) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := btc.rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, - }) - - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the GetCollections - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.False(t, hasErrorCode) - require.Equal(t, errorCode, "") - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) - require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) - - // make sure other functions get called - - requestGetCheckpoint := blip.NewRequest() - requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) - requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 - requestGetCheckpoint.Properties[db.BlipCollection] = "0" - require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) - resp = requestGetCheckpoint.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] - require.Equal(t, errorCode, "") - require.False(t, hasErrorCode) - var checkpoint db.Body - err = resp.ReadJSONBody(&checkpoint) - require.NoErrorf(t, err, "Actual error %+v", checkpoint) - - require.Equal(t, db.Body{"seq": "123"}, checkpoint) }) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, + }) + + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the GetCollections + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.False(t, hasErrorCode) + require.Equal(t, errorCode, "") + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) + + // make sure other functions get called + + requestGetCheckpoint := blip.NewRequest() + requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) + requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 + requestGetCheckpoint.Properties[db.BlipCollection] = "0" + require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) + resp = requestGetCheckpoint.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] + require.Equal(t, errorCode, "") + require.False(t, hasErrorCode) + var checkpoint db.Body + err = resp.ReadJSONBody(&checkpoint) + require.NoErrorf(t, err, "Actual error %+v", checkpoint) + + require.Equal(t, db.Body{"seq": "123"}, checkpoint) + } func TestCollectionsReplication(t *testing.T) { base.TestRequiresCollections(t) - rtConfig := &RestTesterConfig{ + rt := NewRestTester(t, &RestTesterConfig{ GuestEnabled: true, - } - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + }) + defer rt.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - version := btc.rt.PutDoc(docID, "{}") - require.NoError(t, btc.rt.WaitForPendingChanges()) + const docID = "doc1" + version := rt.PutDoc(docID, "{}") + require.NoError(t, rt.WaitForPendingChanges()) - btcCollection := btcRunner.SingleCollection(btc.id) + btcCollection := btc.SingleCollection() - err := btcCollection.StartOneshotPull() - require.NoError(t, err) + err = btcCollection.StartOneshotPull() + require.NoError(t, err) - _, ok := btcCollection.WaitForVersion(docID, version) - require.True(t, ok) - }) + _, ok := btcCollection.WaitForVersion(docID, version) + require.True(t, ok) } func TestBlipReplicationMultipleCollections(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ GuestEnabled: true, - } - btcRunner := NewBlipTesterClientRunner(t) + }, 2) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTesterMultipleCollections(t, rtConfig, 2) - defer rt.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() + docName := "doc1" + body := `{"foo":"bar"}` + versions := make([]DocVersion, 0, len(rt.GetKeyspaces())) + for _, keyspace := range rt.GetKeyspaces() { + resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) + RequireStatus(t, resp, http.StatusCreated) + versions = append(versions, DocVersionFromPutResponse(t, resp)) - docName := "doc1" - body := `{"foo":"bar"}` - versions := make([]DocVersion, 0, len(btc.rt.GetKeyspaces())) - for _, keyspace := range btc.rt.GetKeyspaces() { - resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) - RequireStatus(t, resp, http.StatusCreated) - versions = append(versions, DocVersionFromPutResponse(t, resp)) - } - require.NoError(t, btc.rt.WaitForPendingChanges()) + } + require.NoError(t, rt.WaitForPendingChanges()) - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartPull()) - } + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartPull()) + } - for i, collectionClient := range btc.collectionClients { - msg, ok := collectionClient.WaitForVersion(docName, versions[i]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } + for i, collectionClient := range btc.collectionClients { + msg, ok := collectionClient.WaitForVersion(docName, versions[i]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } - }) } func TestBlipReplicationMultipleCollectionsMismatchedDocSizes(t *testing.T) { - rtConfig := &RestTesterConfig{ + rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ GuestEnabled: true, - } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTesterMultipleCollections(t, rtConfig, 2) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - body := `{"foo":"bar"}` - collectionDocIDs := make(map[string][]string) - collectionVersions := make(map[string][]DocVersion) - require.Len(t, btc.rt.GetKeyspaces(), 2) - for i, keyspace := range btc.rt.GetKeyspaces() { - // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one - docCount := 10 - if i == 0 { - docCount = 1 - } - blipName := btc.rt.getCollectionsForBLIP()[i] - for j := 0; j < docCount; j++ { - docName := fmt.Sprintf("doc%d", j) - resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) - RequireStatus(t, resp, http.StatusCreated) - - version := DocVersionFromPutResponse(t, resp) - collectionVersions[blipName] = append(collectionVersions[blipName], version) - collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) - } + }, 2) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + + body := `{"foo":"bar"}` + collectionDocIDs := make(map[string][]string) + collectionVersions := make(map[string][]DocVersion) + require.Len(t, rt.GetKeyspaces(), 2) + for i, keyspace := range rt.GetKeyspaces() { + // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one + docCount := 10 + if i == 0 { + docCount = 1 } - require.NoError(t, btc.rt.WaitForPendingChanges()) + blipName := rt.getCollectionsForBLIP()[i] + for j := 0; j < docCount; j++ { + docName := fmt.Sprintf("doc%d", j) + resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) + RequireStatus(t, resp, http.StatusCreated) - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartOneshotPull()) + version := DocVersionFromPutResponse(t, resp) + collectionVersions[blipName] = append(collectionVersions[blipName], version) + collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) } + } + require.NoError(t, rt.WaitForPendingChanges()) - for _, collectionClient := range btc.collectionClients { - versions := collectionVersions[collectionClient.collection] - docIDs := collectionDocIDs[collectionClient.collection] - msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartOneshotPull()) + } + + for _, collectionClient := range btc.collectionClients { + versions := collectionVersions[collectionClient.collection] + docIDs := collectionDocIDs[collectionClient.collection] + msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } - }) } diff --git a/rest/blip_api_crud_test.go b/rest/blip_api_crud_test.go index e0b67f1aad..7c041f7cb5 100644 --- a/rest/blip_api_crud_test.go +++ b/rest/blip_api_crud_test.go @@ -1152,8 +1152,53 @@ function(doc, oldDoc) { // Test send and retrieval of a doc. // -// err := btc.StartPull() -// require.NoError(t, err) +// Validate deleted handling (includes check for https://github.com/couchbase/sync_gateway/issues/3341) +func TestBlipSendAndGetRev(t *testing.T) { + + base.SetUpTestLogging(t, base.LevelInfo, base.KeyHTTP, base.KeySync, base.KeySyncMsg) + + rt := NewRestTester(t, nil) + defer rt.Close() + btSpec := BlipTesterSpec{ + connectingUsername: "user1", + connectingPassword: "1234", + } + bt, err := NewBlipTesterFromSpecWithRT(t, &btSpec, rt) + require.NoError(t, err, "Unexpected error creating BlipTester") + defer bt.Close() + + // Send non-deleted rev + sent, _, resp, err := bt.SendRev("sendAndGetRev", "1-abc", []byte(`{"key": "val", "channels": ["user1"]}`), blip.Properties{}) + assert.True(t, sent) + assert.NoError(t, err) + assert.Equal(t, "", resp.Properties["Error-Code"]) + + // Get non-deleted rev + response := bt.restTester.SendAdminRequest("GET", "/{{.keyspace}}/sendAndGetRev?rev=1-abc", "") + RequireStatus(t, response, 200) + var responseBody RestDocument + assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &responseBody), "Error unmarshalling GET doc response") + _, ok := responseBody[db.BodyDeleted] + assert.False(t, ok) + + // Tombstone the document + history := []string{"1-abc"} + sent, _, resp, err = bt.SendRevWithHistory("sendAndGetRev", "2-bcd", history, []byte(`{"key": "val", "channels": ["user1"]}`), blip.Properties{"deleted": "true"}) + assert.True(t, sent) + assert.NoError(t, err) + assert.Equal(t, "", resp.Properties["Error-Code"]) + + // Get the tombstoned document + response = bt.restTester.SendAdminRequest("GET", "/{{.keyspace}}/sendAndGetRev?rev=2-bcd", "") + RequireStatus(t, response, 200) + responseBody = RestDocument{} + assert.NoError(t, base.JSONUnmarshal(response.Body.Bytes(), &responseBody), "Error unmarshalling GET doc response") + deletedValue, deletedOK := responseBody[db.BodyDeleted].(bool) + assert.True(t, deletedOK) + assert.True(t, deletedValue) +} + +// Test send and retrieval of a doc with a large numeric value. Ensure proper large number handling. // // Validate deleted handling (includes check for https://github.com/couchbase/sync_gateway/issues/3341) func TestBlipSendAndGetLargeNumberRev(t *testing.T) { @@ -1729,11 +1774,381 @@ func TestGetRemovedDoc(t *testing.T) { // Reproduce issue SG #3738 // -// btc.Run(func(t *testing.T) { -// // Confirm no error message or panic is returned in response -// response, err := btc.UnsubPullChanges() -// assert.NoError(t, err) -// assert.Empty(t, response) +// - Add 5 docs to channel ABC +// - Purge one doc via _purge REST API +// - Flush rev cache +// - Send subChanges request +// - Reply to all changes saying all docs are wanted +// - Wait to receive rev messages for all 5 docs +// - Expected: receive all 5 docs (4 revs and 1 norev) +// - Actual: only receive 4 docs (4 revs) +func TestMissingNoRev(t *testing.T) { + rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) + defer rt.Close() + ctx := rt.Context() + + bt, err := NewBlipTesterFromSpecWithRT(t, nil, rt) + require.NoError(t, err, "Unexpected error creating BlipTester") + defer bt.Close() + + require.NoError(t, rt.WaitForDBOnline()) + + // Create 5 docs + for i := 0; i < 5; i++ { + docID := fmt.Sprintf("doc-%d", i) + docRev := fmt.Sprintf("1-abc%d", i) + sent, _, resp, err := bt.SendRev(docID, docRev, []byte(`{"key": "val", "channels": ["ABC"]}`), blip.Properties{}) + assert.True(t, sent) + require.NoError(t, err, "resp is %s", resp) + } + + // Pull docs, expect to pull 5 docs since none of them has purged yet. + docs, ok := bt.WaitForNumDocsViaChanges(5) + require.True(t, ok) + assert.Len(t, docs, 5) + + // Purge one doc + doc0Id := fmt.Sprintf("doc-%d", 0) + err = rt.GetSingleTestDatabaseCollectionWithUser().Purge(ctx, doc0Id) + assert.NoError(t, err, "failed") + + // Flush rev cache + rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + + // Pull docs, expect to pull 4 since one was purged. (also expect to NOT get stuck) + docs, ok = bt.WaitForNumDocsViaChanges(4) + assert.True(t, ok) + assert.Len(t, docs, 4) + +} + +// TestBlipPullRevMessageHistory tests that a simple pull replication contains history in the rev message. +func TestBlipPullRevMessageHistory(t *testing.T) { + + base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) + + sgUseDeltas := base.IsEnterpriseEdition() + rtConfig := RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + DeltaSync: &DeltaSyncConfig{ + Enabled: &sgUseDeltas, + }, + }}, + GuestEnabled: true, + } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() + client.ClientDeltas = true + + err = client.StartPull() + assert.NoError(t, err) + + const docID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := client.WaitForVersion(docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = client.WaitForVersion(docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version +} + +// Reproduces CBG-617 (a client using activeOnly for the initial replication, and then still expecting to get subsequent tombstones afterwards) +func TestActiveOnlyContinuous(t *testing.T) { + + base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + + rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + + const docID = "doc1" + version := rt.PutDoc(docID, `{"test":true}`) + + // start an initial pull + require.NoError(t, btc.StartPullSince("true", "0", "true")) + rev, found := btc.WaitForVersion(docID, version) + assert.True(t, found) + assert.Equal(t, `{"test":true}`, string(rev)) + + // delete the doc and make sure the client still gets the tombstone replicated + deletedVersion := rt.DeleteDocReturnVersion(docID, version) + + rev, found = btc.WaitForVersion(docID, deletedVersion) + assert.True(t, found) + assert.Equal(t, `{}`, string(rev)) +} + +// Test that exercises Sync Gateway's norev handler +func TestBlipNorev(t *testing.T) { + + base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + + rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + + norevMsg := db.NewNoRevMessage() + norevMsg.SetId("docid") + norevMsg.SetRev("1-a") + norevMsg.SetSequence(db.SequenceID{Seq: 50}) + norevMsg.SetError("404") + norevMsg.SetReason("couldn't send xyz") + btc.addCollectionProperty(norevMsg.Message) + + // Couchbase Lite always sends noreply=true for norev messages + // but set to false so we can block waiting for a reply + norevMsg.SetNoReply(false) + + // Request that the handler used to process the message is sent back in the response + norevMsg.Properties[db.SGShowHandler] = "true" + + assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) + + // Check that the response we got back was processed by the norev handler + resp := norevMsg.Response() + assert.NotNil(t, resp) + assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) +} + +// TestNoRevSetSeq makes sure the correct string is used with the corresponding function +func TestNoRevSetSeq(t *testing.T) { + norevMsg := db.NewNoRevMessage() + assert.Equal(t, "", norevMsg.Properties[db.NorevMessageSeq]) + assert.Equal(t, "", norevMsg.Properties[db.NorevMessageSequence]) + + norevMsg.SetSequence(db.SequenceID{Seq: 50}) + assert.Equal(t, "50", norevMsg.Properties[db.NorevMessageSequence]) + + norevMsg.SetSeq(db.SequenceID{Seq: 60}) + assert.Equal(t, "60", norevMsg.Properties[db.NorevMessageSeq]) + +} + +func TestRemovedMessageWithAlternateAccess(t *testing.T) { + defer db.SuspendSequenceBatching()() + base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() + + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + }) + require.NoError(t, err) + defer btc.Close() + + const docID = "doc" + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + + version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok = btc.WaitForVersion(docID, version) + assert.True(t, ok) + + version = rt.UpdateDoc(docID, version, `{"channels": []}`) + const docMarker = "docmarker" + docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) + + changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) + require.NoError(t, err) + assert.Len(t, changes.Results, 2) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) + assert.False(t, changes.Results[0].Revoked) + assert.Equal(t, "docmarker", changes.Results[1].ID) + RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) + assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) + assert.False(t, changes.Results[1].Revoked) + + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok = btc.WaitForVersion(docMarker, docMarkerVersion) + assert.True(t, ok) + + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } + } + } + + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 3) + require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A + require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B + require.Len(t, messageBody[2], 3) + + deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + id := messageBody[0].([]interface{})[1] + require.NoError(t, err) + assert.Equal(t, "doc", id) + assert.Equal(t, int64(4), deletedFlags) +} + +// TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication tests the following scenario: +// User has access to channel A and B +// Document rev 1 is in A and B +// Document rev 2 is in channel C +// Document rev 3 is in channel B +// User issues changes requests with since=0 for channel A +// Revocation should not be issued because the user currently has access to channel B, even though they didn't +// have access to the removal revision (rev 2). CBG-2277 + +func TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication(t *testing.T) { + defer db.SuspendSequenceBatching()() + base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() + + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + }) + require.NoError(t, err) + defer btc.Close() + + const ( + docID = "doc" + ) + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + + err = btc.StartOneshotPull() + assert.NoError(t, err) + _, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + + version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) + require.NoError(t, rt.WaitForPendingChanges()) + // At this point changes should send revocation, as document isn't in any of the user's channels + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + + err = btc.StartOneshotPullFiltered("A") + assert.NoError(t, err) + _, ok = btc.WaitForVersion(docID, version) + assert.True(t, ok) + + _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + markerID := "docmarker" + markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) + require.NoError(t, rt.WaitForPendingChanges()) + + // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API + assert.Equal(t, "doc", changes.Results[0].ID) + assert.Equal(t, markerID, changes.Results[1].ID) + + err = btc.StartOneshotPullFiltered("A") + assert.NoError(t, err) + _, ok = btc.WaitForVersion(markerID, markerVersion) + assert.True(t, ok) + + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } + } + } + + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 1) + require.Len(t, messageBody[0], 3) // marker doc + require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) +} + +// Make sure that a client cannot open multiple subChanges subscriptions on a single blip context (SG #3222) +// - Open a one-off subChanges request, ensure it works. +// - Open a subsequent continuous request, and ensure it works. +// - Open another continuous subChanges, and asserts that it gets an error on the 2nd one, because the first is still running. +// - Open another one-off subChanges request, assert we still get an error. // // Asserts on stats to test for regression of CBG-1824: Make sure SubChangesOneShotActive gets decremented when one shot // sub changes request has completed @@ -1947,57 +2362,53 @@ func TestBlipInternalPropertiesHandling(t *testing.T) { } // Setup - rtConfig := &RestTesterConfig{ - GuestEnabled: true, - } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + }) + defer rt.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() - // Track last sequence for next changes feed - var changes ChangesResults - changes.Last_Seq = "0" + // Track last sequence for next changes feed + var changes ChangesResults + changes.Last_Seq = "0" - for i, test := range testCases { - t.Run(test.name, func(t *testing.T) { - docID := fmt.Sprintf("test%d", i) - rawBody, err := json.Marshal(test.inputBody) - require.NoError(t, err) + for i, test := range testCases { + t.Run(test.name, func(t *testing.T) { + docID := fmt.Sprintf("test%d", i) + rawBody, err := json.Marshal(test.inputBody) + require.NoError(t, err) - _, err = btcRunner.PushRev(client.id, docID, EmptyDocVersion(), rawBody) + _, err = client.PushRev(docID, EmptyDocVersion(), rawBody) - if test.expectReject { - assert.Error(t, err) - return - } - assert.NoError(t, err) - - // Wait for rev to be received on RT - err = client.rt.WaitForPendingChanges() - require.NoError(t, err) - changes, err = client.rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) - require.NoError(t, err) - - var bucketDoc map[string]interface{} - _, err = client.rt.GetSingleDataStore().Get(docID, &bucketDoc) - assert.NoError(t, err) - body := client.rt.GetDocBody(docID) - // Confirm input body is in the bucket doc - if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { - for k, v := range test.inputBody { - assert.Equal(t, v, bucketDoc[k]) - assert.Equal(t, v, body[k]) - } + if test.expectReject { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + // Wait for rev to be received on RT + err = rt.WaitForPendingChanges() + require.NoError(t, err) + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) + require.NoError(t, err) + + var bucketDoc map[string]interface{} + _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) + assert.NoError(t, err) + body := rt.GetDocBody(docID) + // Confirm input body is in the bucket doc + if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { + for k, v := range test.inputBody { + assert.Equal(t, v, bucketDoc[k]) + assert.Equal(t, v, body[k]) } - }) - } - }) + } + }) + } } // CBG-2053: Test that the handleRev stats still increment correctly when going through the processRev function with @@ -2117,144 +2528,133 @@ func TestSendRevisionNoRevHandling(t *testing.T) { if !base.UnitTestUrlIsWalrus() { t.Skip("Skip LeakyBucket test when running in integration") } - - rtConfig := &RestTesterConfig{ - GuestEnabled: true, - CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), + testCases := []struct { + error error + expectNoRev bool + }{ + { + error: gocb.ErrDocumentNotFound, + expectNoRev: true, + }, + { + error: gocb.ErrOverload, + expectNoRev: false, + }, } - btcRunner := NewBlipTesterClientRunner(t) - btcRunner.SkipVersionVectorInitialization = true // test is for norev handling, this will be different in version vector subprotocol - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - leakyDataStore, ok := base.AsLeakyDataStore(btc.rt.Bucket().DefaultDataStore()) - require.True(t, ok) - - testCases := []struct { - error error - expectNoRev bool - }{ - { - error: gocb.ErrDocumentNotFound, - expectNoRev: true, - }, - { - error: gocb.ErrOverload, - expectNoRev: false, - }, - } - for _, test := range testCases { - t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { - docName := fmt.Sprintf("%s", test.error) - - // Change noRev handler so it's known when a noRev is received - recievedNoRevs := make(chan *blip.Message) - btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { - fmt.Println("Received noRev", msg.Properties) - recievedNoRevs <- msg - } + for _, test := range testCases { + t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { + docName := fmt.Sprintf("%s", test.error) + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), + }) + defer rt.Close() - version := btc.rt.PutDoc(docName, `{"foo":"bar"}`) + leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) + require.True(t, ok) - // Make the LeakyBucket return an error - leakyDataStore.SetGetRawCallback(func(key string) error { - return test.error - }) - leakyDataStore.SetGetWithXattrCallback(func(key string) error { - return test.error - }) + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - // Flush cache so document has to be retrieved from the leaky bucket - btc.rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() - - err := btcRunner.StartPull(btc.id) - require.NoError(t, err) - - // Wait 3 seconds for noRev to be received - select { - case msg := <-recievedNoRevs: - if test.expectNoRev { - assert.Equal(t, docName, msg.Properties["id"]) - } else { - require.Fail(t, "Received unexpected noRev message", msg) - } - case <-time.After(3 * time.Second): - if test.expectNoRev { - require.Fail(t, "Didn't receive expected noRev") - } - } + // Change noRev handler so it's known when a noRev is received + recievedNoRevs := make(chan *blip.Message) + btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { + fmt.Println("Received noRev", msg.Properties) + recievedNoRevs <- msg + } + + version := rt.PutDoc(docName, `{"foo":"bar"}`) - // Make sure document did not get replicated - _, found := btcRunner.GetVersion(btc.id, docName, version) - assert.False(t, found) + // Make the LeakyBucket return an error + leakyDataStore.SetGetRawCallback(func(key string) error { + return test.error }) - } - }) + leakyDataStore.SetGetWithXattrCallback(func(key string) error { + return test.error + }) + + // Flush cache so document has to be retrieved from the leaky bucket + rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + + err = btc.StartPull() + require.NoError(t, err) + + // Wait 3 seconds for noRev to be received + select { + case msg := <-recievedNoRevs: + if test.expectNoRev { + assert.Equal(t, docName, msg.Properties["id"]) + } else { + require.Fail(t, "Received unexpected noRev message", msg) + } + case <-time.After(3 * time.Second): + if test.expectNoRev { + require.Fail(t, "Didn't receive expected noRev") + } + } + + // Make sure document did not get replicated + _, found := btc.GetVersion(docName, version) + assert.False(t, found) + }) + } } func TestUnsubChanges(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - rtConfig := &RestTesterConfig{GuestEnabled: true} + rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) + + defer rt.Close() + + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() + // Confirm no error message or panic is returned in response + response, err := btc.UnsubPullChanges() + assert.NoError(t, err) + assert.Empty(t, response) + + // Sub changes + err = btc.StartPull() + require.NoError(t, err) const ( doc1ID = "doc1ID" doc2ID = "doc2ID" ) - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - // Confirm no error message or panic is returned in response - response, err := btcRunner.UnsubPullChanges(btc.id) - assert.NoError(t, err) - assert.Empty(t, response) + doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) + _, found := btc.WaitForVersion(doc1ID, doc1Version) + require.True(t, found) - // Sub changes - err = btcRunner.StartPull(btc.id) - require.NoError(t, err) - doc1Version := btc.rt.PutDoc(doc1ID, `{"key":"val1"}`) - _, found := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) - require.True(t, found) - - activeReplStat := btc.rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous - require.EqualValues(t, 1, activeReplStat.Value()) + activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous + require.EqualValues(t, 1, activeReplStat.Value()) - // Unsub changes - response, err = btcRunner.UnsubPullChanges(btc.id) - assert.NoError(t, err) - assert.Empty(t, response) - // Wait for unsub changes to stop the sub changes being sent before sending document up - base.RequireWaitForStat(t, activeReplStat.Value, 0) - - // Confirm no more changes are being sent - doc2Version := btc.rt.PutDoc(doc2ID, `{"key":"val1"}`) - err = btc.rt.WaitForConditionWithOptions(func() bool { - _, found = btcRunner.GetVersion(btc.id, "doc2", doc2Version) - return found - }, 10, 100) - assert.Error(t, err) - - // Confirm no error message is still returned when no subchanges active - response, err = btcRunner.UnsubPullChanges(btc.id) - assert.NoError(t, err) - assert.Empty(t, response) + // Unsub changes + response, err = btc.UnsubPullChanges() + assert.NoError(t, err) + assert.Empty(t, response) + // Wait for unsub changes to stop the sub changes being sent before sending document up + base.RequireWaitForStat(t, activeReplStat.Value, 0) + + // Confirm no more changes are being sent + doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) + err = rt.WaitForConditionWithOptions(func() bool { + _, found = btc.GetVersion("doc2", doc2Version) + return found + }, 10, 100) + assert.Error(t, err) + + // Confirm no error message is still returned when no subchanges active + response, err = btc.UnsubPullChanges() + assert.NoError(t, err) + assert.Empty(t, response) - // Confirm the pull replication can be restarted and it syncs doc2 - err = btcRunner.StartPull(btc.id) - require.NoError(t, err) - _, found = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) - assert.True(t, found) - }) + // Confirm the pull replication can be restarted and it syncs doc2 + err = btc.StartPull() + require.NoError(t, err) + _, found = btc.WaitForVersion(doc2ID, doc2Version) + assert.True(t, found) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the replication. @@ -2271,48 +2671,47 @@ func TestRequestPlusPull(t *testing.T) { } }`, } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - // Initialize blip tester client (will create user) - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "bernard", - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer client.Close() - database := client.rt.GetDatabase() - // Put a doc in channel PBS - response := client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Initialize blip tester client (will create user) + client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "bernard", + }) + require.NoError(t, err) + defer client.Close() - // Write a document granting user 'bernard' access to PBS - response = client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - // Start a regular one-shot pull - err := btcRunner.StartOneshotPullRequestPlus(client.id) - assert.NoError(t, err) + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Start a regular one-shot pull + err = client.StartOneshotPullRequestPlus() + assert.NoError(t, err) + + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := client.WaitForDoc("pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) - }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the db config. @@ -2334,48 +2733,47 @@ func TestRequestPlusPullDbConfig(t *testing.T) { }, }, } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - // Initialize blip tester client (will create user) - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "bernard", - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer client.Close() - database := client.rt.GetDatabase() - // Put a doc in channel PBS - response := client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() + + // Initialize blip tester client (will create user) + client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "bernard", + }) + require.NoError(t, err) + defer client.Close() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Write a document granting user 'bernard' access to PBS - response = client.rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Start a regular one-shot pull - err := btcRunner.StartOneshotPull(client.id) - assert.NoError(t, err) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err = client.StartOneshotPull() + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := client.WaitForDoc("pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) - }) } // TestBlipRefreshUser makes sure there is no panic if a user gets deleted during a replication @@ -2396,55 +2794,53 @@ func TestBlipRefreshUser(t *testing.T) { rtConfig := RestTesterConfig{ SyncFn: channels.DocChannelsSyncFunction, } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + const username = "bernard" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - // Initialize blip tester client (will create user) - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ // This test will need refactoring when its getting fixed in CBG-3512 - Username: username, - Channels: []string{"chan1"}, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer btc.Close() + // Initialize blip tester client (will create user) + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "bernard", + Channels: []string{"chan1"}, + }) - // add chan1 explicitly - response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) - RequireStatus(t, response, http.StatusOK) + require.NoError(t, err) + defer btc.Close() - const docID = "doc1" - version := rt.PutDoc(docID, `{"channels":["chan1"]}`) + // add chan1 explicitly + response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) + RequireStatus(t, response, http.StatusOK) - // Start a regular one-shot pull - err := btcRunner.StartPullSince(btc.id, "true", "0", "false") - require.NoError(t, err) + const docID = "doc1" + version := rt.PutDoc(docID, `{"channels":["chan1"]}`) - _, ok := btcRunner.WaitForDoc(btc.id, docID) - require.True(t, ok) + // Start a regular one-shot pull + err = btc.StartPullSince("true", "0", "false") + require.NoError(t, err) - _, ok = btcRunner.GetVersion(btc.id, docID, version) - require.True(t, ok) + _, ok := btc.WaitForDoc(docID) + require.True(t, ok) - // delete user with an active blip connection - response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") - RequireStatus(t, response, http.StatusOK) + _, ok = btc.GetVersion(docID, version) + require.True(t, ok) - require.NoError(t, rt.WaitForPendingChanges()) + // delete user with an active blip connection + response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") + RequireStatus(t, response, http.StatusOK) - // further requests will 500, but shouldn't panic - unsubChangesRequest := blip.NewRequest() - unsubChangesRequest.SetProfile(db.MessageUnsubChanges) - btc.addCollectionProperty(unsubChangesRequest) + require.NoError(t, rt.WaitForPendingChanges()) - err = btc.pullReplication.sendMsg(unsubChangesRequest) - require.NoError(t, err) + // further requests will 500, but shouldn't panic + unsubChangesRequest := blip.NewRequest() + unsubChangesRequest.SetProfile(db.MessageUnsubChanges) + btc.addCollectionProperty(unsubChangesRequest) - testResponse := unsubChangesRequest.Response() - require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) - body, err := testResponse.Body() - require.NoError(t, err) - require.NotContains(t, string(body), "Panic:") - }) + err = btc.pullReplication.sendMsg(unsubChangesRequest) + require.NoError(t, err) + + testResponse := unsubChangesRequest.Response() + require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) + body, err := testResponse.Body() + require.NoError(t, err) + require.NotContains(t, string(body), "Panic:") } diff --git a/rest/blip_api_delta_sync_test.go b/rest/blip_api_delta_sync_test.go index 93f0a1992f..74651c909e 100644 --- a/rest/blip_api_delta_sync_test.go +++ b/rest/blip_api_delta_sync_test.go @@ -33,64 +33,57 @@ func TestBlipDeltaSyncPushAttachment(t *testing.T) { const docID = "pushAttachmentDoc" - rtConfig := &RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - DeltaSync: &DeltaSyncConfig{ - Enabled: base.BoolPtr(true), - }, - }}, - GuestEnabled: true, - } - - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + rt := NewRestTester(t, + &RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + DeltaSync: &DeltaSyncConfig{ + Enabled: base.BoolPtr(true), + }, + }}, + GuestEnabled: true, + }) + defer rt.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - // Push first rev - version, err := btcRunner.PushRev(btc.id, docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) - require.NoError(t, err) + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - // Push second rev with an attachment (no delta yet) - attData := base64.StdEncoding.EncodeToString([]byte("attach")) + // Push first rev + version, err := btc.PushRev(docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) + require.NoError(t, err) - version, err = btcRunner.PushRev(btc.id, docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) - require.NoError(t, err) + // Push second rev with an attachment (no delta yet) + attData := base64.StdEncoding.EncodeToString([]byte("attach")) - syncData, err := btc.rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + version, err = btc.PushRev(docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found := syncData.Attachments["myAttachment"] - assert.True(t, found) + syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - // Turn deltas on - btc.ClientDeltas = true + assert.Len(t, syncData.Attachments, 1) + _, found := syncData.Attachments["myAttachment"] + assert.True(t, found) - // Get existing body with the stub attachment, insert a new property and push as delta. - body, found := btcRunner.GetVersion(btc.id, docID, version) - require.True(t, found) + // Turn deltas on + btc.ClientDeltas = true - newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) - require.NoError(t, err) + // Get existing body with the stub attachment, insert a new property and push as delta. + body, found := btc.GetVersion(docID, version) + require.True(t, found) - _, err = btcRunner.PushRev(btc.id, docID, version, newBody) - require.NoError(t, err) + newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) + require.NoError(t, err) - syncData, err = btc.rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + _, err = btc.PushRev(docID, version, newBody) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found = syncData.Attachments["myAttachment"] - assert.True(t, found) + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - // set client deltas back to false for next run - btc.ClientDeltas = false - }) + assert.Len(t, syncData.Attachments, 1) + _, found = syncData.Attachments["myAttachment"] + assert.True(t, found) } // Test pushing and pulling new attachments through delta sync @@ -113,63 +106,59 @@ func TestBlipDeltaSyncPushPullNewAttachment(t *testing.T) { }}, GuestEnabled: true, } - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer btc.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer btc.Close() - - btc.ClientDeltas = true - err := btcRunner.StartPull(btc.id) - assert.NoError(t, err) + btc.ClientDeltas = true + err = btc.StartPull() + assert.NoError(t, err) + const docID = "doc1" - // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := btc.rt.PutDoc(docID, bodyText) - data, ok := btcRunner.WaitForVersion(btc.id, docID, version) - assert.True(t, ok) - - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client by adding another attachment. - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` - version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := btc.rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 2) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - world, ok := attachments["world.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) - assert.Equal(t, float64(11), world["length"]) - assert.Equal(t, float64(2), world["revpos"]) - assert.Equal(t, true, world["stub"]) - }) + // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) + data, ok := btc.WaitForVersion(docID, version) + assert.True(t, ok) + + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client by adding another attachment. + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` + version, err = btc.PushRev(docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 2) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + world, ok := attachments["world.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) + assert.Equal(t, float64(11), world["length"]) + assert.Equal(t, float64(2), world["revpos"]) + assert.Equal(t, true, world["stub"]) } // TestBlipDeltaSyncNewAttachmentPull tests that adding a new attachment in SG and replicated via delta sync adds the attachment @@ -186,87 +175,84 @@ func TestBlipDeltaSyncNewAttachmentPull(t *testing.T) { }}, GuestEnabled: true, } - btcRunner := NewBlipTesterClientRunner(t) - const doc1ID = "doc1" + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() - client.ClientDeltas = true - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) + client.ClientDeltas = true + err = client.StartPull() + assert.NoError(t, err) - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := client.rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, doc1ID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment - version = client.rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) - - data, ok = btcRunner.WaitForVersion(client.id, doc1ID, version) - assert.True(t, ok) - var dataMap map[string]interface{} - assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) - atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, atts, 1) - hello, ok := atts["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // message #3 is the getAttachment message that is sent in-between rev processing - msg, ok := client.pullReplication.WaitForMessage(3) - assert.True(t, ok) - assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") - - // Check EE is delta, and CE is full-body replication - // msg, ok = client.pullReplication.WaitForMessage(5) - msg, ok = btcRunner.WaitForBlipRevMessage(client.id, doc1ID, version) - assert.True(t, ok) - - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) - assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) - } + const doc1ID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := client.WaitForVersion(doc1ID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment + version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) + + data, ok = client.WaitForVersion(doc1ID, version) + assert.True(t, ok) + var dataMap map[string]interface{} + assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) + atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, atts, 1) + hello, ok := atts["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // message #3 is the getAttachment message that is sent in-between rev processing + msg, ok := client.pullReplication.WaitForMessage(3) + assert.True(t, ok) + assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") + + // Check EE is delta, and CE is full-body replication + // msg, ok = client.pullReplication.WaitForMessage(5) + msg, ok = client.WaitForBlipRevMessage(doc1ID, version) + assert.True(t, ok) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) + assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) + } - respBody := client.rt.GetDocVersion(doc1ID, version) - assert.Equal(t, doc1ID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 2) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - atts = respBody[db.BodyAttachments].(map[string]interface{}) - assert.Len(t, atts, 1) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) - }) + respBody := rt.GetDocVersion(doc1ID, version) + assert.Equal(t, doc1ID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 2) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + atts = respBody[db.BodyAttachments].(map[string]interface{}) + assert.Len(t, atts, 1) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) } // TestBlipDeltaSyncPull tests that a simple pull replication uses deltas in EE, @@ -276,7 +262,7 @@ func TestBlipDeltaSyncPull(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -284,69 +270,66 @@ func TestBlipDeltaSyncPull(t *testing.T) { }}, GuestEnabled: true, } + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() var deltaSentCount int64 - btcRunner := NewBlipTesterClientRunner(t) + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() + + client.ClientDeltas = true + err = client.StartPull() + assert.NoError(t, err) + const docID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + data, ok := client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaSentCount = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + data, ok = client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + msg, ok := client.WaitForBlipRevMessage(docID, version) + assert.True(t, ok) - client.ClientDeltas = true - err := btcRunner.StartPull(client.id) + // Check EE is delta, and CE is full-body replication + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version = client.rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version) - assert.True(t, ok) - - // Check EE is delta, and CE is full-body replication - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - - var afterDeltaSyncCount int64 - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - afterDeltaSyncCount = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - - assert.Equal(t, deltaSentCount, afterDeltaSyncCount) + var afterDeltaSyncCount int64 + if rt.GetDatabase().DbStats.DeltaSync() != nil { + afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() } - }) + + assert.Equal(t, deltaSentCount, afterDeltaSyncCount) + } } // TestBlipDeltaSyncPullResend tests that a simple pull replication that uses a delta a client rejects will resend the revision in full. @@ -358,7 +341,7 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: base.BoolPtr(true), @@ -366,60 +349,58 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { }}, GuestEnabled: true, } - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() - // create doc1 rev 1 - docVersion1 := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - deltaSentCount := client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - - // reject deltas built ontop of rev 1 - client.rejectDeltasForSrcRev = docVersion1.RevID - - client.ClientDeltas = true - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) - data, ok := btcRunner.WaitForVersion(client.id, docID, docVersion1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2 - docVersion2 := client.rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = btcRunner.WaitForVersion(client.id, docID, docVersion2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - - // Check the request was initially sent with the correct deltaSrc property - assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - - msg, ok = btcRunner.WaitForBlipRevMessage(client.id, docID, docVersion2) - assert.True(t, ok) - - // Check the resent request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err = msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - }) + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + + docID := "doc1" + // create doc1 rev 1 + docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() + + // reject deltas built ontop of rev 1 + client.rejectDeltasForSrcRev = docVersion1.RevID + + client.ClientDeltas = true + err = client.StartPull() + assert.NoError(t, err) + data, ok := client.WaitForVersion(docID, docVersion1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2 + docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = client.WaitForVersion(docID, docVersion2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + + // Check the request was initially sent with the correct deltaSrc property + assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + + msg, ok = client.WaitForBlipRevMessage(docID, docVersion2) + assert.True(t, ok) + + // Check the resent request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err = msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) } // TestBlipDeltaSyncPullRemoved tests a simple pull replication that drops a document out of the user's channel. @@ -428,7 +409,7 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -438,45 +419,43 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + + client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + require.NoError(t, err) + defer client.Close() - btcRunner := NewBlipTesterClientRunner(t) - const docID = "doc1" - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - defer client.Close() - - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) - - // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 - version := client.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 - version = client.rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) - - data, ok = btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{"_removed":true}`, string(data)) + err = client.StartPull() + assert.NoError(t, err) - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_removed":true}`, string(msgBody)) - }) + const docID = "doc1" + // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + + data, ok := client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 + version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) + + data, ok = client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{"_removed":true}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_removed":true}`, string(msgBody)) } // TestBlipDeltaSyncPullTombstoned tests a simple pull replication that deletes a document. @@ -494,7 +473,7 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -504,79 +483,78 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer client.Close() - - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) - - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := client.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - data, ok := btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = client.rt.DeleteDocReturnVersion(docID, version) + client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + }) + require.NoError(t, err) + defer client.Close() - data, ok = btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) + err = client.StartPull() + assert.NoError(t, err) - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{}`, string(msgBody)) - assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) - - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 - - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + const docID = "doc1" + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + data, ok := client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{}`, string(msgBody)) + assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } - }) + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } } // TestBlipDeltaSyncPullTombstonedStarChan tests two clients can perform a simple pull replication that deletes a document when the user has access to the star channel. @@ -598,133 +576,129 @@ func TestBlipDeltaSyncPullTombstonedStarChan(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyHTTP, base.KeyCache, base.KeySync, base.KeySyncMsg) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 - btcRunner := NewBlipTesterClientRunner(t) - const docID = "doc1" - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "client1", - Channels: []string{"*"}, - ClientDeltas: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer client1.Close() - client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "client2", - Channels: []string{"*"}, - ClientDeltas: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer client2.Close() - - if client1.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - - err := btcRunner.StartPull(client1.id) - require.NoError(t, err) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + client1, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "client1", + Channels: []string{"*"}, + ClientDeltas: true, + }) + require.NoError(t, err) + defer client1.Close() - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := client1.rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + client2, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "client2", + Channels: []string{"*"}, + ClientDeltas: true, + }) + require.NoError(t, err) + defer client2.Close() - data, ok := btcRunner.WaitForVersion(client1.id, docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + err = client1.StartPull() + require.NoError(t, err) - // Have client2 get only rev-1 and then stop replicating - err = btcRunner.StartOneshotPull(client2.id) - assert.NoError(t, err) - data, ok = btcRunner.WaitForVersion(client2.id, docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = client1.rt.DeleteDocReturnVersion(docID, version) - - data, ok = btcRunner.WaitForVersion(client1.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok := btcRunner.WaitForBlipRevMessage(client1.id, docID, version) // docid, revid to get the message - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - msgBody, err := msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } + const docID = "doc1" + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + + data, ok := client1.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // Have client2 get only rev-1 and then stop replicating + err = client2.StartOneshotPull() + assert.NoError(t, err) + data, ok = client2.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = client1.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok := client1.WaitForBlipRevMessage(docID, version) // docid, revid to get the message + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + msgBody, err := msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } - // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache - err = btcRunner.StartOneshotPull(client2.id) - assert.NoError(t, err) - data, ok = btcRunner.WaitForVersion(client2.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok = btcRunner.WaitForBlipRevMessage(client2.id, docID, version) - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - msgBody, err = msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } + // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache + err = client2.StartOneshotPull() + assert.NoError(t, err) + data, ok = client2.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok = client2.WaitForBlipRevMessage(docID, version) + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + msgBody, err = msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 - if client1.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = client1.rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) - assert.Equal(t, deltasSentStart+2, deltasSentEnd) - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } - }) + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) + assert.Equal(t, deltasSentStart+2, deltasSentEnd) + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } } // TestBlipDeltaSyncPullRevCache tests that a simple pull replication uses deltas in EE, @@ -746,78 +720,79 @@ func TestBlipDeltaSyncPullRevCache(t *testing.T) { }}, GuestEnabled: true, } - const docID = "doc1" - btcRunner := NewBlipTesterClientRunner(t) + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() - client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client2.Close() - client.ClientDeltas = true + client.ClientDeltas = true + err = client.StartPull() + assert.NoError(t, err) - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) - - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // Perform a one-shot pull as client 2 to pull down the first revision - client2.ClientDeltas = true - err = btcRunner.StartOneshotPull(client2.id) - assert.NoError(t, err) - data, ok = btcRunner.WaitForVersion(client2.id, docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := client.rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) - - data, ok = btcRunner.WaitForVersion(client.id, docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) - msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version2) - assert.True(t, ok) - - // Check EE is delta - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - - deltaCacheHits := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMisses := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version - client2.ClientDeltas = true - err = btcRunner.StartOneshotPull(client2.id) - assert.NoError(t, err) - msg2, ok := btcRunner.WaitForBlipRevMessage(client2.id, docID, version2) - assert.True(t, ok) - - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody2, err := msg2.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) - - updatedDeltaCacheHits := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - updatedDeltaCacheMisses := client.rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + const docID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := client.WaitForVersion(docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // Perform a one-shot pull as client 2 to pull down the first revision + + client2, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client2.Close() + + client2.ClientDeltas = true + err = client2.StartOneshotPull() + assert.NoError(t, err) + data, ok = client2.WaitForVersion(docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) + + data, ok = client.WaitForVersion(docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) + msg, ok := client.WaitForBlipRevMessage(docID, version2) + assert.True(t, ok) + + // Check EE is delta + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version + client2.ClientDeltas = true + err = client2.StartOneshotPull() + assert.NoError(t, err) + msg2, ok := client2.WaitForBlipRevMessage(docID, version2) + assert.True(t, ok) + + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody2, err := msg2.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) + + updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) + assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) - assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) - assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) - }) } // TestBlipDeltaSyncPush tests that a simple push replication handles deltas in EE, @@ -826,7 +801,7 @@ func TestBlipDeltaSyncPush(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -834,99 +809,96 @@ func TestBlipDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - btcRunner := NewBlipTesterClientRunner(t) + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() + + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() + client.ClientDeltas = true + + err = client.StartPull() + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 const docID = "doc1" + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + data, ok := client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abc on client + newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + assert.NoError(t, err) - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() - client.ClientDeltas = true + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) - collection := client.rt.GetSingleTestDatabaseCollection() - err := btcRunner.StartPull(client.id) + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abc on client - newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + // Validate that generation of a delta didn't mutate the revision body in the revision cache + docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) + assert.NoError(t, cacheErr) + assert.NotContains(t, docRev.BodyBytes, "bob") + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + } - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) - - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - - // Validate that generation of a delta didn't mutate the revision body in the revision cache - docRev, cacheErr := client.rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) - assert.NoError(t, cacheErr) - assert.NotContains(t, docRev.BodyBytes, "bob") - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - } + respBody := rt.GetDocVersion(docID, newRev) + assert.Equal(t, "doc1", respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 3) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) - respBody := client.rt.GetDocVersion(docID, newRev) - assert.Equal(t, "doc1", respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 3) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) + // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) + deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) - // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) - deletedVersion := client.rt.DeleteDocReturnVersion(docID, newRev) + data, ok = client.WaitForVersion(docID, deletedVersion) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) - data, ok = btcRunner.WaitForVersion(client.id, docID, deletedVersion) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) + var deltaPushDocCountStart int64 - var deltaPushDocCountStart int64 + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountStart = client.rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } + _, err = client.PushRev(docID, deletedVersion, []byte(`{"undelete":true}`)) - _, err = btcRunner.PushRev(client.id, docID, deletedVersion, []byte(`{"undelete":true}`)) - - if base.IsEnterpriseEdition() { - // Now make the client push up a delta that has the parent of the tombstone. - // This is not a valid scenario, and is actively prevented on the CBL side. - assert.Error(t, err) - assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") - } else { - // Pushing a full body revision on top of a tombstone is valid. - // CBL clients should fall back to this. The test client doesn't. - assert.NoError(t, err) - } + if base.IsEnterpriseEdition() { + // Now make the client push up a delta that has the parent of the tombstone. + // This is not a valid scenario, and is actively prevented on the CBL side. + assert.Error(t, err) + assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") + } else { + // Pushing a full body revision on top of a tombstone is valid. + // CBL clients should fall back to this. The test client doesn't. + assert.NoError(t, err) + } - var deltaPushDocCountEnd int64 + var deltaPushDocCountEnd int64 - if client.rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountEnd = client.rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } - assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) - }) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } + assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) } // TestBlipNonDeltaSyncPush tests that a client that doesn't support deltas can push to a SG that supports deltas (either CE or EE) @@ -934,7 +906,7 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := &RestTesterConfig{ + rtConfig := RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -942,44 +914,41 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - btcRunner := NewBlipTesterClientRunner(t) - const docID = "doc1" - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() - collection := client.rt.GetSingleTestDatabaseCollection() - client.ClientDeltas = false - err := btcRunner.StartPull(client.id) - assert.NoError(t, err) + client.ClientDeltas = false + err = client.StartPull() + assert.NoError(t, err) - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := client.rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := btcRunner.WaitForVersion(client.id, docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abcxyz on client - newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) - - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - - body := client.rt.GetDocVersion("doc1", newRev) - require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) - }) + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + const docID = "doc1" + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := client.WaitForVersion(docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abcxyz on client + newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + assert.NoError(t, err) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + + body := rt.GetDocVersion("doc1", newRev) + require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) } diff --git a/rest/blip_api_no_race_test.go b/rest/blip_api_no_race_test.go index 286db3905a..f6e35f9cf1 100644 --- a/rest/blip_api_no_race_test.go +++ b/rest/blip_api_no_race_test.go @@ -44,68 +44,65 @@ func TestBlipPusherUpdateDatabase(t *testing.T) { GuestEnabled: true, CustomTestBucket: tb.NoCloseClone(), } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) - defer client.Close() - - var lastPushRevErr atomic.Value - - // Wait for the background updates to finish at the end of the test - shouldCreateDocs := base.NewAtomicBool(true) - wg := sync.WaitGroup{} - wg.Add(1) - defer func() { - shouldCreateDocs.Set(false) - wg.Wait() - }() - - // Start the test client creating and pushing documents in the background - go func() { - for i := 0; shouldCreateDocs.IsTrue(); i++ { - // this will begin to error when the database is reloaded underneath the replication - _, err := btcRunner.PushRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) - if err != nil { - lastPushRevErr.Store(err) - } + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) + require.NoError(t, err) + defer client.Close() + + var lastPushRevErr atomic.Value + + // Wait for the background updates to finish at the end of the test + shouldCreateDocs := base.NewAtomicBool(true) + wg := sync.WaitGroup{} + wg.Add(1) + defer func() { + shouldCreateDocs.Set(false) + wg.Wait() + }() + + // Start the test client creating and pushing documents in the background + go func() { + for i := 0; shouldCreateDocs.IsTrue(); i++ { + // this will begin to error when the database is reloaded underneath the replication + _, err := client.PushRev(fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) + if err != nil { + lastPushRevErr.Store(err) } - _ = rt.WaitForPendingChanges() - wg.Done() - }() - - // and wait for a few to be done before we proceed with updating database config underneath replication - _, err := rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) - require.NoError(t, err) - - // just change the sync function to cause the database to reload - dbConfig := *rt.ServerContext().GetDbConfig("db") - dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) - resp := rt.ReplaceDbConfig("db", dbConfig) - RequireStatus(t, resp, http.StatusCreated) - - // Did we tell the client to close the connection (via HTTP/503)? - // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. - WaitAndAssertCondition(t, func() bool { - lastErr, ok := lastPushRevErr.Load().(error) - if !ok { - return false - } - if lastErr == nil { - return false - } - lastErrMsg := lastErr.Error() - if !strings.Contains(lastErrMsg, "HTTP 503") { - return false - } - if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { - return false - } - return true - }, "expected HTTP 503 error") - }) + } + _ = rt.WaitForPendingChanges() + wg.Done() + }() + + // and wait for a few to be done before we proceed with updating database config underneath replication + _, err = rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) + require.NoError(t, err) + + // just change the sync function to cause the database to reload + dbConfig := *rt.ServerContext().GetDbConfig("db") + dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) + resp := rt.ReplaceDbConfig("db", dbConfig) + RequireStatus(t, resp, http.StatusCreated) + + // Did we tell the client to close the connection (via HTTP/503)? + // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. + WaitAndAssertCondition(t, func() bool { + lastErr, ok := lastPushRevErr.Load().(error) + if !ok { + return false + } + if lastErr == nil { + return false + } + lastErrMsg := lastErr.Error() + if !strings.Contains(lastErrMsg, "HTTP 503") { + return false + } + if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { + return false + } + return true + }, "expected HTTP 503 error") + } diff --git a/rest/blip_client_test.go b/rest/blip_client_test.go index 2d0c69d97e..f3e808aae0 100644 --- a/rest/blip_client_test.go +++ b/rest/blip_client_test.go @@ -46,7 +46,6 @@ type BlipTesterClientOpts struct { type BlipTesterClient struct { BlipTesterClientOpts - id uint32 // unique ID for the client rt *RestTester pullReplication *BlipTesterReplicator // SG -> CBL replications pushReplication *BlipTesterReplicator // CBL -> SG replications @@ -70,14 +69,6 @@ type BlipTesterCollectionClient struct { lastReplicatedRevLock sync.RWMutex // lock for lastReplicatedRev map } -// BlipTestClientRunner is for running the blip tester client and its associated methods in test framework -type BlipTestClientRunner struct { - clients map[uint32]*BlipTesterClient // map of created BlipTesterClient's - t *testing.T - initialisedInsideRunnerCode bool // flag to check that the BlipTesterClient is being initialised in the correct area (inside the Run() method) - SkipVersionVectorInitialization bool // used to skip the version vector subtest -} - type BodyMessagePair struct { body []byte message *blip.Message @@ -94,14 +85,6 @@ type BlipTesterReplicator struct { replicationStats *db.BlipSyncStats // Stats of replications } -// NewBlipTesterClientRunner creates a BlipTestClientRunner type -func NewBlipTesterClientRunner(t *testing.T) *BlipTestClientRunner { - return &BlipTestClientRunner{ - t: t, - clients: make(map[uint32]*BlipTesterClient), - } -} - func (btr *BlipTesterReplicator) Close() { btr.bt.Close() btr.messagesLock.Lock() @@ -588,83 +571,33 @@ func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { return collections } -// NewBlipTesterClientOptsWithRT creates a BlipTesterClient and adds it to the map of clients on the BlipTestClientRunner. Then creates replications on the client -func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { - if !btcRunner.initialisedInsideRunnerCode { - btcRunner.t.Fatalf("must initialise BlipTesterClient inside Run() method") - } +func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { if opts == nil { opts = &BlipTesterClientOpts{} } - id, err := uuid.NewRandom() - require.NoError(btcRunner.t, err) - - client = &BlipTesterClient{ + btc := BlipTesterClient{ BlipTesterClientOpts: *opts, rt: rt, - id: id.ID(), - } - btcRunner.clients[client.id] = client - err = client.createBlipTesterReplications() - require.NoError(btcRunner.t, err) - - return client -} - -func (btc *BlipTesterClient) Close() { - btc.tearDownBlipClientReplications() - for _, collectionClient := range btc.collectionClients { - collectionClient.Close() } - if btc.nonCollectionAwareClient != nil { - btc.nonCollectionAwareClient.Close() - } -} - -// Run runs two subtests of the input test code, one for rev tree replications and another for version vector enabled replication -func (btcRunner *BlipTestClientRunner) Run(test func(t *testing.T, SupportedBLIPProtocols []string)) { - btcRunner.initialisedInsideRunnerCode = true - // reset to protect against someone creating a new client after Run() is run - defer func() { btcRunner.initialisedInsideRunnerCode = false }() - btcRunner.t.Run("revTree", func(t *testing.T) { - test(t, []string{db.BlipCBMobileReplicationV3}) - }) - // if test is not wanting version vector subprotocol to be run, return before we start this subtest - if btcRunner.SkipVersionVectorInitialization { - return - } - btcRunner.t.Run("versionVector", func(t *testing.T) { - // bump sub protocol version here and pass into test function pending CBG-3253 - test(t, nil) - }) -} -// tearDownBlipClientReplications closes any underlying BlipTesterReplications running on the BlipTesterClient -func (btc *BlipTesterClient) tearDownBlipClientReplications() { - btc.pullReplication.Close() - btc.pushReplication.Close() -} - -// createBlipTesterReplications initiates new BlipTesterReplications on the BlipTesterClient -func (btc *BlipTesterClient) createBlipTesterReplications() error { id, err := uuid.NewRandom() if err != nil { - return err + return nil, err } - if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { - return err + if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { + return nil, err } - if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { - return err + if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { + return nil, err } - collections := getCollectionsForBLIP(btc.rt.TB, btc.rt) - if !btc.BlipTesterClientOpts.SkipCollectionsInitialization && len(collections) > 0 { + collections := getCollectionsForBLIP(tb, rt) + if !opts.SkipCollectionsInitialization && len(collections) > 0 { btc.collectionClients = make([]*BlipTesterCollectionClient, len(collections)) for i, collection := range collections { if err := btc.initCollectionReplication(collection, i); err != nil { - return err + return nil, err } } } else { @@ -672,14 +605,40 @@ func (btc *BlipTesterClient) createBlipTesterReplications() error { docs: make(map[string]map[string]*BodyMessagePair), attachments: make(map[string][]byte), lastReplicatedRev: make(map[string]string), - parent: btc, + parent: &btc, } + } - btc.pullReplication.bt.avoidRestTesterClose = true - btc.pushReplication.bt.avoidRestTesterClose = true + return &btc, nil +} - return nil +// NewBlipTesterClient returns a client which emulates the behaviour of a CBL client over BLIP. +func NewBlipTesterClient(tb testing.TB, rt *RestTester) (client *BlipTesterClient, err error) { + return createBlipTesterClientOpts(tb, rt, nil) +} + +func NewBlipTesterClientOptsWithRT(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { + client, err = createBlipTesterClientOpts(tb, rt, opts) + if err != nil { + return nil, err + } + + client.pullReplication.bt.avoidRestTesterClose = true + client.pushReplication.bt.avoidRestTesterClose = true + + return client, nil +} + +func (btc *BlipTesterClient) Close() { + btc.pullReplication.Close() + btc.pushReplication.Close() + for _, collectionClient := range btc.collectionClients { + collectionClient.Close() + } + if btc.nonCollectionAwareClient != nil { + btc.nonCollectionAwareClient.Close() + } } func (btc *BlipTesterClient) initCollectionReplication(collection string, collectionIdx int) error { @@ -709,25 +668,25 @@ func (btc *BlipTesterClient) waitForReplicationMessage(collection *db.DatabaseCo } // SingleCollection returns a single collection blip tester if the RestTester database is configured with only one collection. Otherwise, throw a fatal test error. -func (btcRunner *BlipTestClientRunner) SingleCollection(clientID uint32) *BlipTesterCollectionClient { - if btcRunner.clients[clientID].nonCollectionAwareClient != nil { - return btcRunner.clients[clientID].nonCollectionAwareClient +func (btc *BlipTesterClient) SingleCollection() *BlipTesterCollectionClient { + if btc.nonCollectionAwareClient != nil { + return btc.nonCollectionAwareClient } - require.Equal(btcRunner.clients[clientID].rt.TB, 1, len(btcRunner.clients[clientID].collectionClients)) - return btcRunner.clients[clientID].collectionClients[0] + require.Equal(btc.rt.TB, 1, len(btc.collectionClients)) + return btc.collectionClients[0] } // Collection return a collection blip tester by name, if configured in the RestTester database. Otherwise, throw a fatal test error. -func (btcRunner *BlipTestClientRunner) Collection(clientID uint32, collectionName string) *BlipTesterCollectionClient { - if collectionName == "_default._default" && btcRunner.clients[clientID].nonCollectionAwareClient != nil { - return btcRunner.clients[clientID].nonCollectionAwareClient +func (btc *BlipTesterClient) Collection(collectionName string) *BlipTesterCollectionClient { + if collectionName == "_default._default" && btc.nonCollectionAwareClient != nil { + return btc.nonCollectionAwareClient } - for _, collectionClient := range btcRunner.clients[clientID].collectionClients { + for _, collectionClient := range btc.collectionClients { if collectionClient.collection == collectionName { return collectionClient } } - btcRunner.clients[clientID].rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) + btc.rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) return nil } @@ -1167,81 +1126,81 @@ func (btc *BlipTesterCollectionClient) GetBlipRevMessage(docID, revID string) (m return nil, false } -func (btcRunner *BlipTestClientRunner) StartPull(clientID uint32) error { - return btcRunner.SingleCollection(clientID).StartPull() +func (btc *BlipTesterClient) StartPull() error { + return btc.SingleCollection().StartPull() } // WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. -func (btcRunner *BlipTestClientRunner) WaitForVersion(clientID uint32, docID string, docVersion DocVersion) (data []byte, found bool) { - return btcRunner.SingleCollection(clientID).WaitForVersion(docID, docVersion) +func (btc *BlipTesterClient) WaitForVersion(docID string, docVersion DocVersion) (data []byte, found bool) { + return btc.SingleCollection().WaitForVersion(docID, docVersion) } -func (btcRunner *BlipTestClientRunner) WaitForDoc(clientID uint32, docID string) ([]byte, bool) { - return btcRunner.SingleCollection(clientID).WaitForDoc(docID) +func (btc *BlipTesterClient) WaitForDoc(docID string) ([]byte, bool) { + return btc.SingleCollection().WaitForDoc(docID) } -func (btcRunner *BlipTestClientRunner) WaitForBlipRevMessage(clientID uint32, docID string, docVersion DocVersion) (*blip.Message, bool) { - return btcRunner.SingleCollection(clientID).WaitForBlipRevMessage(docID, docVersion) +func (btc *BlipTesterClient) WaitForBlipRevMessage(docID string, docVersion DocVersion) (*blip.Message, bool) { + return btc.SingleCollection().WaitForBlipRevMessage(docID, docVersion) } -func (btcRunner *BlipTestClientRunner) StartOneshotPull(clientID uint32) error { - return btcRunner.SingleCollection(clientID).StartOneshotPull() +func (btc *BlipTesterClient) StartOneshotPull() error { + return btc.SingleCollection().StartOneshotPull() } -func (btcRunner *BlipTestClientRunner) StartOneshotPullFiltered(clientID uint32, channels string) error { - return btcRunner.SingleCollection(clientID).StartOneshotPullFiltered(channels) +func (btc *BlipTesterClient) StartOneshotPullFiltered(channels string) error { + return btc.SingleCollection().StartOneshotPullFiltered(channels) } -func (btcRunner *BlipTestClientRunner) StartOneshotPullRequestPlus(clientID uint32) error { - return btcRunner.SingleCollection(clientID).StartOneshotPullRequestPlus() +func (btc *BlipTesterClient) StartOneshotPullRequestPlus() error { + return btc.SingleCollection().StartOneshotPullRequestPlus() } -func (btcRunner *BlipTestClientRunner) PushRev(clientID uint32, docID string, version DocVersion, body []byte) (DocVersion, error) { - return btcRunner.SingleCollection(clientID).PushRev(docID, version, body) +func (btc *BlipTesterClient) PushRev(docID string, version DocVersion, body []byte) (DocVersion, error) { + return btc.SingleCollection().PushRev(docID, version, body) } -func (btcRunner *BlipTestClientRunner) StartPullSince(clientID uint32, continuous, since, activeOnly string) error { - return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, "", "") +func (btc *BlipTesterClient) StartPullSince(continuous, since, activeOnly string) error { + return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, "", "") } -func (btcRunner *BlipTestClientRunner) StartFilteredPullSince(clientID uint32, continuous, since, activeOnly, channels string) error { - return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, channels, "") +func (btc *BlipTesterClient) StartFilteredPullSince(continuous, since, activeOnly string, channels string) error { + return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, channels, "") } -func (btcRunner *BlipTestClientRunner) GetVersion(clientID uint32, docID string, docVersion DocVersion) ([]byte, bool) { - return btcRunner.SingleCollection(clientID).GetVersion(docID, docVersion) +func (btc *BlipTesterClient) GetVersion(docID string, docVersion DocVersion) ([]byte, bool) { + return btc.SingleCollection().GetVersion(docID, docVersion) } -func (btcRunner *BlipTestClientRunner) saveAttachment(clientID uint32, contentType string, attachmentData string) (int, string, error) { - return btcRunner.SingleCollection(clientID).saveAttachment(contentType, attachmentData) +func (btc *BlipTesterClient) saveAttachment(contentType string, attachmentData string) (int, string, error) { + return btc.SingleCollection().saveAttachment(contentType, attachmentData) } -func (btcRunner *BlipTestClientRunner) StoreRevOnClient(clientID uint32, docID, revID string, body []byte) error { - return btcRunner.SingleCollection(clientID).StoreRevOnClient(docID, revID, body) +func (btc *BlipTesterClient) StoreRevOnClient(docID, revID string, body []byte) error { + return btc.SingleCollection().StoreRevOnClient(docID, revID, body) } -func (btcRunner *BlipTestClientRunner) PushRevWithHistory(clientID uint32, docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { - return btcRunner.SingleCollection(clientID).PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) +func (btc *BlipTesterClient) PushRevWithHistory(docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { + return btc.SingleCollection().PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) } -func (btcRunner *BlipTestClientRunner) AttachmentsLock(clientID uint32) *sync.RWMutex { - return &btcRunner.SingleCollection(clientID).attachmentsLock +func (btc *BlipTesterClient) AttachmentsLock() *sync.RWMutex { + return &btc.SingleCollection().attachmentsLock } func (btc *BlipTesterCollectionClient) AttachmentsLock() *sync.RWMutex { return &btc.attachmentsLock } -func (btcRunner *BlipTestClientRunner) Attachments(clientID uint32) map[string][]byte { - return btcRunner.SingleCollection(clientID).attachments +func (btc *BlipTesterClient) Attachments() map[string][]byte { + return btc.SingleCollection().attachments } func (btc *BlipTesterCollectionClient) Attachments() map[string][]byte { return btc.attachments } -func (btcRunner *BlipTestClientRunner) UnsubPullChanges(clientID uint32) ([]byte, error) { - return btcRunner.SingleCollection(clientID).UnsubPullChanges() +func (btc *BlipTesterClient) UnsubPullChanges() ([]byte, error) { + return btc.SingleCollection().UnsubPullChanges() } func (btc *BlipTesterCollectionClient) addCollectionProperty(msg *blip.Message) { diff --git a/rest/bulk_api.go b/rest/bulk_api.go index 15f6b731a6..83358e7c49 100644 --- a/rest/bulk_api.go +++ b/rest/bulk_api.go @@ -511,7 +511,7 @@ func (h *handler) handleBulkDocs() error { err = base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions") } else { revid = revisions[0] - _, _, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, doc, revisions, false, db.ExistingVersionWithUpdateToHLV) + _, _, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, doc, revisions, false) } } diff --git a/rest/doc_api.go b/rest/doc_api.go index d7ca12924e..4c278e8f0c 100644 --- a/rest/doc_api.go +++ b/rest/doc_api.go @@ -471,7 +471,7 @@ func (h *handler) handlePutDoc() error { if revisions == nil { return base.HTTPErrorf(http.StatusBadRequest, "Bad _revisions") } - doc, newRev, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, body, revisions, false, db.ExistingVersionWithUpdateToHLV) + doc, newRev, err = h.collection.PutExistingRevWithBody(h.ctx(), docid, body, revisions, false) if err != nil { return err } @@ -548,7 +548,7 @@ func (h *handler) handlePutDocReplicator2(docid string, roundTrip bool) (err err newDoc.UpdateBody(body) } - doc, rev, err := h.collection.PutExistingRev(h.ctx(), newDoc, history, true, false, nil, db.ExistingVersionWithUpdateToHLV) + doc, rev, err := h.collection.PutExistingRev(h.ctx(), newDoc, history, true, false, nil) if err != nil { return err diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go index 6426dfe99d..0fa9f61b82 100644 --- a/rest/importtest/import_test.go +++ b/rest/importtest/import_test.go @@ -424,9 +424,6 @@ func TestXattrDoubleDelete(t *testing.T) { } func TestViewQueryTombstoneRetrieval(t *testing.T) { - t.Skip("Disabled pending CBG-3503") - base.SkipImportTestsIfNotEnabled(t) - if !base.TestsDisableGSI() { t.Skip("views tests are not applicable under GSI") } diff --git a/rest/replicatortest/replicator_test.go b/rest/replicatortest/replicator_test.go index 7dc1fa4118..8c67c764c3 100644 --- a/rest/replicatortest/replicator_test.go +++ b/rest/replicatortest/replicator_test.go @@ -8320,46 +8320,3 @@ func requireBodyEqual(t *testing.T, expected string, doc *db.Document) { require.NoError(t, base.JSONUnmarshal([]byte(expected), &expectedBody)) require.Equal(t, expectedBody, doc.Body(base.TestCtx(t))) } - -// TestReplicatorUpdateHLVOnPut: -// - For purpose of testing the PutExistingRev code path -// - Put a doc on a active rest tester -// - Create replication and wait for the doc to be replicated to passive node -// - Assert on the HLV in the metadata of the replicated document -func TestReplicatorUpdateHLVOnPut(t *testing.T) { - - activeRT, passiveRT, remoteURL, teardown := rest.SetupSGRPeers(t) - defer teardown() - - // Grab the bucket UUIDs for both rest testers - activeBucketUUID, err := activeRT.GetDatabase().Bucket.UUID() - require.NoError(t, err) - - const rep = "replication" - - // Put a doc and assert on the HLV update in the sync data - resp := activeRT.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/doc1", `{"source": "activeRT"}`) - rest.RequireStatus(t, resp, http.StatusCreated) - - syncData, err := activeRT.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS := base.HexCasToUint64(syncData.Cas) - - assert.Equal(t, activeBucketUUID, syncData.HLV.SourceID) - assert.Equal(t, uintCAS, syncData.HLV.Version) - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) - - // create the replication to push the doc to the passive node and wait for the doc to be replicated - activeRT.CreateReplication(rep, remoteURL, db.ActiveReplicatorTypePush, nil, false, db.ConflictResolverDefault) - - _, err = passiveRT.WaitForChanges(1, "/{{.keyspace}}/_changes", "", true) - require.NoError(t, err) - - // assert on the HLV update on the passive node - syncData, err = passiveRT.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), "doc1") - assert.NoError(t, err) - uintCAS = base.HexCasToUint64(syncData.Cas) - - // TODO: assert that the SourceID and Verison pair are preserved correctly pending CBG-3211 - assert.Equal(t, uintCAS, syncData.HLV.CurrentVersionCAS) -} diff --git a/rest/revocation_test.go b/rest/revocation_test.go index cd58cea1d5..35359e5f1d 100644 --- a/rest/revocation_test.go +++ b/rest/revocation_test.go @@ -2223,195 +2223,190 @@ func TestReplicatorRevocationsFromZero(t *testing.T) { func TestRevocationMessage(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - btcRunner := NewBlipTesterClientRunner(t) - const doc1ID = "doc1" + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer btc.Close() + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + }) + assert.NoError(t, err) + defer btc.Close() + + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc("doc", `{"channels": "A"}`) - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := btc.rt.PutDoc("doc", `{"channels": "A"}`) + require.NoError(t, rt.WaitForPendingChanges()) - require.NoError(t, btc.rt.WaitForPendingChanges()) + // Start pull + err = btc.StartOneshotPull() + assert.NoError(t, err) - // Start pull - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) + // Wait for doc revision to come over + _, ok := btc.WaitForBlipRevMessage("doc", version) + require.True(t, ok) - // Wait for doc revision to come over - _, ok := btcRunner.WaitForBlipRevMessage(btc.id, "doc", version) - require.True(t, ok) + // Remove role from user + revocationTester.removeRole("user", "foo") - // Remove role from user - revocationTester.removeRole("user", "foo") + const doc1ID = "doc1" + version = rt.PutDoc(doc1ID, `{"channels": "!"}`) - version = btc.rt.PutDoc(doc1ID, `{"channels": "!"}`) + revocationTester.fillToSeq(10) + version = rt.UpdateDoc(doc1ID, version, "{}") - revocationTester.fillToSeq(10) - version = btc.rt.UpdateDoc(doc1ID, version, "{}") + require.NoError(t, rt.WaitForPendingChanges()) - require.NoError(t, btc.rt.WaitForPendingChanges()) + // Start a pull since 5 to receive revocation and removal + err = btc.StartPullSince("false", "5", "false") + assert.NoError(t, err) - // Start a pull since 5 to receive revocation and removal - err = btcRunner.StartPullSince(btc.id, "false", "5", "false") - assert.NoError(t, err) + // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here + _, found := btc.WaitForVersion(doc1ID, version) + require.True(t, found) + + messages := btc.pullReplication.GetMessages() + + testCases := []struct { + Name string + DocID string + ExpectedDeleted int64 + }{ + { + Name: "Revocation", + DocID: "doc", + ExpectedDeleted: int64(2), + }, + { + Name: "Removed", + DocID: "doc1", + ExpectedDeleted: int64(4), + }, + } - // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here - _, found := btcRunner.WaitForVersion(btc.id, doc1ID, version) - require.True(t, found) - - messages := btc.pullReplication.GetMessages() - - testCases := []struct { - Name string - DocID string - ExpectedDeleted int64 - }{ - { - Name: "Revocation", - DocID: "doc", - ExpectedDeleted: int64(2), - }, - { - Name: "Removed", - DocID: "doc1", - ExpectedDeleted: int64(4), - }, - } + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + // Verify the deleted property in the changes message is "2" this indicated a revocation + for _, msg := range messages { + if msg.Properties[db.BlipProfile] == db.MessageChanges { + var changesMessages [][]interface{} + err = msg.ReadJSONBody(&changesMessages) + if err != nil { + continue + } - for _, testCase := range testCases { - t.Run(testCase.Name, func(t *testing.T) { - // Verify the deleted property in the changes message is "2" this indicated a revocation - for _, msg := range messages { - if msg.Properties[db.BlipProfile] == db.MessageChanges { - var changesMessages [][]interface{} - err = msg.ReadJSONBody(&changesMessages) - if err != nil { + if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { + continue + } + + criteriaMet := false + for _, changesMessage := range changesMessages { + castedNum, ok := changesMessage[3].(json.Number) + if !ok { continue } - - if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { + intDeleted, err := castedNum.Int64() + if err != nil { continue } - - criteriaMet := false - for _, changesMessage := range changesMessages { - castedNum, ok := changesMessage[3].(json.Number) - if !ok { - continue - } - intDeleted, err := castedNum.Int64() - if err != nil { - continue - } - if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { - criteriaMet = true - break - } + if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { + criteriaMet = true + break } - - assert.True(t, criteriaMet) } + + assert.True(t, criteriaMet) } - }) - } - assert.NoError(t, err) - }) + } + }) + } + + assert.NoError(t, err) } func TestRevocationNoRev(t *testing.T) { defer db.SuspendSequenceBatching()() - const docID = "doc" - const waitMarkerID = "docmarker" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer btc.Close() + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + }) + assert.NoError(t, err) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := btc.rt.PutDoc(docID, `{"channels": "A"}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, btc.rt.WaitForPendingChanges()) - firstOneShotSinceSeq := btc.rt.GetDocumentSequence("doc") + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + const docID = "doc" + version := rt.PutDoc(docID, `{"channels": "A"}`) - // OneShot pull to grab doc - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - _, ok := btcRunner.WaitForVersion(btc.id, docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err = btc.StartOneshotPull() + assert.NoError(t, err) - // Remove role from user - revocationTester.removeRole("user", "foo") + _, ok := btc.WaitForVersion(docID, version) + require.True(t, ok) - _ = btc.rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + // Remove role from user + revocationTester.removeRole("user", "foo") - waitMarkerVersion := btc.rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, btc.rt.WaitForPendingChanges()) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") - assert.NoError(t, err) + const waitMarkerID = "docmarker" + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) - require.True(t, ok) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btc.StartPullSince("false", lastSeqStr, "false") + assert.NoError(t, err) - messages := btc.pullReplication.GetMessages() + _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) + require.True(t, ok) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message - } + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message } } + } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - require.NoError(t, err) - require.Len(t, messageBody, 2) - require.Len(t, messageBody[0], 4) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + require.NoError(t, err) + require.Len(t, messageBody, 2) + require.Len(t, messageBody[0], 4) - deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - require.NoError(t, err) + deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + require.NoError(t, err) - assert.Equal(t, deletedFlag, int64(2)) - }) + assert.Equal(t, deletedFlag, int64(2)) } func TestRevocationGetSyncDataError(t *testing.T) { @@ -2419,111 +2414,106 @@ func TestRevocationGetSyncDataError(t *testing.T) { var throw bool base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) // Two callbacks to cover usage with CBS/Xattrs and without - rtConfig := &RestTesterConfig{ - leakyBucketConfig: &base.LeakyBucketConfig{ - GetWithXattrCallback: func(key string) error { - return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") - }, GetRawCallback: func(key string) error { - if throw { - return fmt.Errorf("Leaky Bucket GetRawCallback Error") - } - return nil + revocationTester, rt := InitScenario( + t, &RestTesterConfig{ + leakyBucketConfig: &base.LeakyBucketConfig{ + GetWithXattrCallback: func(key string) error { + return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") + }, GetRawCallback: func(key string) error { + if throw { + return fmt.Errorf("Leaky Bucket GetRawCallback Error") + } + return nil + }, }, }, - } + ) - const docID = "doc" - const waitMarkerID = "docmarker" - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - revocationTester, rt := InitScenario(t, rtConfig) - defer rt.Close() - btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, - }) - defer btc.Close() + defer rt.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + }) + assert.NoError(t, err) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := btc.rt.PutDoc(docID, `{"channels": "A"}}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, btc.rt.WaitForPendingChanges()) - firstOneShotSinceSeq := btc.rt.GetDocumentSequence("doc") + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + const docID = "doc" + version := rt.PutDoc(docID, `{"channels": "A"}}`) - // OneShot pull to grab doc - err := btcRunner.StartOneshotPull(btc.id) - assert.NoError(t, err) - throw = true - _, ok := btcRunner.WaitForVersion(btc.id, docID, version) - require.True(t, ok) + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - // Remove role from user - revocationTester.removeRole("user", "foo") + // OneShot pull to grab doc + err = btc.StartOneshotPull() + assert.NoError(t, err) + throw = true + _, ok := btc.WaitForVersion(docID, version) + require.True(t, ok) - _ = btc.rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + // Remove role from user + revocationTester.removeRole("user", "foo") - waitMarkerVersion := btc.rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, btc.rt.WaitForPendingChanges()) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") - assert.NoError(t, err) + const waitMarkerID = "docmarker" + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) - require.True(t, ok) - }) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btc.StartPullSince("false", lastSeqStr, "false") + assert.NoError(t, err) + + _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) + require.True(t, ok) } // Regression test for CBG-2183. func TestBlipRevokeNonExistentRole(t *testing.T) { - base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - rtConfig := &RestTesterConfig{ - GuestEnabled: false, - } - btcRunner := NewBlipTesterClientRunner(t) - - btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { - rt := NewRestTester(t, rtConfig) - defer rt.Close() - bt := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ - Username: "bilbo", - SendRevocations: true, - SupportedBLIPProtocols: SupportedBLIPProtocols, + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: false, }) - defer bt.Close() - - collection := bt.rt.GetSingleTestDatabaseCollection() + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) - res := bt.rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", bt.rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) - RequireStatus(t, res, http.StatusOK) + base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - // Create a doc so we have something to replicate - res = bt.rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) - RequireStatus(t, res, http.StatusCreated) + // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) + res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) + RequireStatus(t, res, http.StatusCreated) - // 3. Update the user to not reference one of the roles (update to ['a1'], for example) - // [also revoke channel c1 so the doc shows up in the revocation queries] - res = bt.rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", bt.rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) - RequireStatus(t, res, http.StatusOK) + // Create a doc so we have something to replicate + res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) + RequireStatus(t, res, http.StatusCreated) - // 4. Try to sync - require.NoError(t, btcRunner.StartPull(bt.id)) + // 3. Update the user to not reference one of the roles (update to ['a1'], for example) + // [also revoke channel c1 so the doc shows up in the revocation queries] + res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) + RequireStatus(t, res, http.StatusOK) - // in the failing case we'll panic before hitting this - base.RequireWaitForStat(t, func() int64 { - return bt.rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() - }, 1) + // 4. Try to sync + bt, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ + Username: "bilbo", + SendRevocations: true, }) + require.NoError(t, err) + defer bt.Close() + + require.NoError(t, bt.StartPull()) + + // in the failing case we'll panic before hitting this + base.RequireWaitForStat(t, func() int64 { + return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() + }, 1) } func TestReplicatorSwitchPurgeNoReset(t *testing.T) { diff --git a/rest/user_api_test.go b/rest/user_api_test.go index fd5d979c21..fc97dbbcdf 100644 --- a/rest/user_api_test.go +++ b/rest/user_api_test.go @@ -1479,7 +1479,7 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { _, err := dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData) assert.NoError(t, err) - docRev, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) + docRev, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) assert.Equal(t, 0, len(docRev.Channels.ToArray())) assert.Equal(t, syncData.CurrentRev, docRev.RevID) @@ -1499,7 +1499,7 @@ func TestUserXattrAvoidRevisionIDGeneration(t *testing.T) { _, err = dataStore.GetXattr(rt.Context(), docKey, base.SyncXattrName, &syncData2) assert.NoError(t, err) - docRev2, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().GetWithRev(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) + docRev2, err := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), docKey, syncData.CurrentRev, true, false) assert.NoError(t, err) assert.Equal(t, syncData2.CurrentRev, docRev2.RevID) From 1db17638fd47b702da9837475c7cb5e5e0757875 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Tue, 14 Nov 2023 11:45:37 -0500 Subject: [PATCH 09/14] put lint timeout in config (#6576) This means it is automatically picked up by editors in addition to CI. Usually this is fast, but sometimes staticcheck can slow down, especially on mac. Once it is cached, it is quick. Updated golangci-lint version in github actions. --- .github/workflows/ci.yml | 4 ++-- .golangci-strict.yml | 2 ++ .golangci.yml | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b7e6c1e5ae..827a3b51b3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,8 +54,8 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@v3 with: - version: v1.55.0 - args: --config=.golangci-strict.yml --timeout=3m + version: v1.55.2 + args: --config=.golangci-strict.yml test: runs-on: ${{ matrix.os }} diff --git a/.golangci-strict.yml b/.golangci-strict.yml index c01f53b37c..18e485e41a 100644 --- a/.golangci-strict.yml +++ b/.golangci-strict.yml @@ -8,6 +8,8 @@ # config file for golangci-lint +timeout: 3m + linters: enable: #- bodyclose # checks whether HTTP response body is closed successfully diff --git a/.golangci.yml b/.golangci.yml index b0cf702de9..2ae7f5c367 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,6 +8,8 @@ # config file for golangci-lint +timeout: 3m + linters: enable: - bodyclose # checks whether HTTP response body is closed successfully From 488fae46f50c95802db257db6bff90def82438af Mon Sep 17 00:00:00 2001 From: Gregory Newman-Smith <109068393+gregns1@users.noreply.github.com> Date: Tue, 14 Nov 2023 16:54:15 +0000 Subject: [PATCH 10/14] CBG-3576: changes to BlipTesterClient to to run in VV and non VV protocol versions (#6574) --- rest/attachment_test.go | 366 ++++---- rest/blip_api_attachment_test.go | 895 ++++++++++--------- rest/blip_api_collections_test.go | 552 ++++++------ rest/blip_api_crud_test.go | 939 ++++++++++---------- rest/blip_api_delta_sync_test.go | 1352 +++++++++++++++-------------- rest/blip_api_no_race_test.go | 123 +-- rest/blip_client_test.go | 206 +++-- rest/revocation_test.go | 456 +++++----- 8 files changed, 2559 insertions(+), 2330 deletions(-) diff --git a/rest/attachment_test.go b/rest/attachment_test.go index 0b480d4127..f44408068d 100644 --- a/rest/attachment_test.go +++ b/rest/attachment_test.go @@ -2260,184 +2260,205 @@ func TestAttachmentDeleteOnExpiry(t *testing.T) { } func TestUpdateExistingAttachment(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + doc1Version := rt.PutDoc(doc1ID, `{}`) + doc2Version := rt.PutDoc(doc2ID, `{}`) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + require.NoError(t, rt.WaitForPendingChanges()) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) - doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - assert.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) + + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":3}}}`)) + require.NoError(t, err) - assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, "attachmentB", string(req.BodyBytes())) + doc1, err := rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + assert.NoError(t, err) + + assert.Equal(t, "sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=", doc1.Attachments["attachment"].(map[string]interface{})["digest"]) + + req := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, "attachmentB", string(req.BodyBytes())) + }) } // TestPushUnknownAttachmentAsStub sets revpos to an older generation, for an attachment that doesn't exist on the server. // Verifies that getAttachment is triggered, and attachment is properly persisted. func TestPushUnknownAttachmentAsStub(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() - + } const doc1ID = "doc1" - doc1Version := rt.PutDoc(doc1ID, `{}`) + btcRunner := NewBlipTesterClientRunner(t) - require.NoError(t, rt.WaitForPendingChanges()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Add doc1 and doc2 + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) + require.NoError(t, btc.rt.WaitForPendingChanges()) - // force attachment into test client's store to validate it's fetched - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - contentType := "text/plain" + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - length, digest, err := btc.saveAttachment(contentType, attachmentAData) - require.NoError(t, err) - // Update doc1, include reference to non-existing attachment with recent revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) - require.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) - require.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + // force attachment into test client's store to validate it's fetched + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + contentType := "text/plain" - // verify that attachment exists on document and was persisted - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + length, digest, err := btcRunner.saveAttachment(btc.id, contentType, attachmentAData) + require.NoError(t, err) + // Update doc1, include reference to non-existing attachment with recent revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(fmt.Sprintf(`{"key": "val", "_attachments":{"attachment":{"digest":"%s","length":%d,"content_type":"%s","stub":true,"revpos":1}}}`, digest, length, contentType))) + require.NoError(t, err) + + require.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + // verify that attachment exists on document and was persisted + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + }) } func TestMinRevPosWorkToAvoidUnnecessaryProveAttachment(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ AllowConflicts: base.BoolPtr(true), }, }, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } - // Push an initial rev with attachment data + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc" - initialVersion := rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) - // Replicate data to client and ensure doc arrives - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, initialVersion) - assert.True(t, found) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Push a revision with a bunch of history simulating doc updated on mobile device - // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment - proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() - revid, err := btc.PushRevWithHistory(docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Push an initial rev with attachment data + initialVersion := btc.rt.PutDoc(docID, `{"_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Push another bunch of history - _, err = btc.PushRevWithHistory(docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) - assert.NoError(t, err) - proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() - assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + // Replicate data to client and ensure doc arrives + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, initialVersion) + assert.True(t, found) + + // Push a revision with a bunch of history simulating doc updated on mobile device + // Note this references revpos 1 and therefore SGW has it - Shouldn't need proveAttachment + proveAttachmentBefore := btc.pushReplication.replicationStats.ProveAttachment.Value() + revid, err := btcRunner.PushRevWithHistory(btc.id, docID, initialVersion.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter := btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + + // Push another bunch of history + _, err = btcRunner.PushRevWithHistory(btc.id, docID, revid, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}`), 25, 5) + assert.NoError(t, err) + proveAttachmentAfter = btc.pushReplication.replicationStats.ProveAttachment.Value() + assert.Equal(t, proveAttachmentBefore, proveAttachmentAfter) + }) } + func TestAttachmentWithErroneousRevPos(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) - // Create rev 1 with the hello.txt attachment - const docID = "doc" - version := rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) - err = rt.WaitForPendingChanges() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Pull rev and attachment down to client - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, found := btc.WaitForVersion(docID, version) - assert.True(t, found) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() + // Create rev 1 with the hello.txt attachment + const docID = "doc" + version := btc.rt.PutDoc(docID, `{"val": "val", "_attachments": {"hello.txt": {"data": "aGVsbG8gd29ybGQ="}}}`) + err := btc.rt.WaitForPendingChanges() + assert.NoError(t, err) - // Add an attachment to client - btc.AttachmentsLock().Lock() - btc.Attachments()["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") - btc.AttachmentsLock().Unlock() + // Pull rev and attachment down to client + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, found := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, found) - // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment - _, err = btc.PushRevWithHistory(docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) - require.NoError(t, err) + // Add an attachment to client + btcRunner.AttachmentsLock(btc.id).Lock() + btcRunner.Attachments(btc.id)["sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="] = []byte("goodbye cruel world") + btcRunner.AttachmentsLock(btc.id).Unlock() - // Ensure message and attachment is pushed up - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + // Put doc with an erroneous revpos 1 but with a different digest, referring to the above attachment + _, err = btcRunner.PushRevWithHistory(btc.id, docID, version.RevID, []byte(`{"_attachments": {"hello.txt": {"revpos":1,"stub":true,"length": 19,"digest":"sha1-l+N7VpXGnoxMm8xfvtWPbz2YvDc="}}}`), 1, 0) + require.NoError(t, err) - // Get the attachment and ensure the data is updated - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + // Ensure message and attachment is pushed up + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + // Get the attachment and ensure the data is updated + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/doc/hello.txt", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, "goodbye cruel world", string(resp.BodyBytes())) + }) } // CBG-2004: Test that prove attachment over Blip works correctly when receiving a ErrAttachmentNotFound @@ -2578,74 +2599,79 @@ func TestPutInvalidAttachment(t *testing.T) { // validates that proveAttachment isn't being invoked when the attachment is already present and the // digest doesn't change, regardless of revpos. func TestCBLRevposHandling(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - assert.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1" doc2ID = "doc2" ) - doc1Version := rt.PutDoc(doc1ID, `{}`) - doc2Version := rt.PutDoc(doc2ID, `{}`) - require.NoError(t, rt.WaitForPendingChanges()) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - _, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) - attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) + opts := BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &opts) + defer btc.Close() - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) - require.NoError(t, err) - doc2Version, err = btc.PushRev(doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) - require.NoError(t, err) + doc1Version := btc.rt.PutDoc(doc1ID, `{}`) + doc2Version := btc.rt.PutDoc(doc2ID, `{}`) + require.NoError(t, btc.rt.WaitForPendingChanges()) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) - assert.NoError(t, rt.WaitForVersion(doc2ID, doc2Version)) + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) - require.NoError(t, err) - _, err = rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) - require.NoError(t, err) + attachmentAData := base64.StdEncoding.EncodeToString([]byte("attachmentA")) + attachmentBData := base64.StdEncoding.EncodeToString([]byte("attachmentB")) - // Update doc1, don't change attachment, use correct revpos - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) - require.NoError(t, err) + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentAData+`"}}}`)) + require.NoError(t, err) + doc2Version, err = btcRunner.PushRev(btc.id, doc2ID, doc2Version, []byte(`{"key": "val", "_attachments": {"attachment": {"data": "`+attachmentBData+`"}}}`)) + require.NoError(t, err) - assert.NoError(t, rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) + assert.NoError(t, btc.rt.WaitForVersion(doc2ID, doc2Version)) - // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. - doc1Version, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) - require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc1", db.DocUnmarshalAll) + require.NoError(t, err) + _, err = btc.rt.GetSingleTestDatabaseCollection().GetDocument(base.TestCtx(t), "doc2", db.DocUnmarshalAll) + require.NoError(t, err) - // Validate attachment exists - attResponse := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use correct revpos + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":2}}}`)) + require.NoError(t, err) - attachmentPushCount := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment - _, err = btc.PushRev(doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) - require.NoError(t, err) + assert.NoError(t, btc.rt.WaitForVersion(doc1ID, doc1Version)) - // Validate attachment exists and is updated - attResponse = rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") - assert.Equal(t, 200, attResponse.Code) - assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + // Update doc1, don't change attachment, use revpos=generation of revid, as CBL 2.x does. Should not proveAttachment on digest match. + doc1Version, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-wzp8ZyykdEuZ9GuqmxQ7XDrY7Co=","length":11,"content_type":"","stub":true,"revpos":4}}}`)) + require.NoError(t, err) - attachmentPushCountAfter := rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() - assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + // Validate attachment exists + attResponse := btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentA", string(attResponse.BodyBytes())) + attachmentPushCount := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + // Update doc1, change attachment digest with CBL revpos=generation. Should getAttachment + _, err = btcRunner.PushRev(btc.id, doc1ID, doc1Version, []byte(`{"key": "val", "_attachments":{"attachment":{"digest":"sha1-SKk0IV40XSHW37d3H0xpv2+z9Ck=","length":11,"content_type":"","stub":true,"revpos":5}}}`)) + require.NoError(t, err) + + // Validate attachment exists and is updated + attResponse = btc.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc1/attachment", "") + assert.Equal(t, 200, attResponse.Code) + assert.Equal(t, "attachmentB", string(attResponse.BodyBytes())) + + attachmentPushCountAfter := btc.rt.GetDatabase().DbStats.CBLReplicationPushStats.AttachmentPushCount.Value() + assert.Equal(t, attachmentPushCount+1, attachmentPushCountAfter) + }) } // Helper_Functions diff --git a/rest/blip_api_attachment_test.go b/rest/blip_api_attachment_test.go index 266e580678..15a16c2c62 100644 --- a/rest/blip_api_attachment_test.go +++ b/rest/blip_api_attachment_test.go @@ -43,56 +43,63 @@ func TestBlipPushPullV2AttachmentV2Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + // given this test is for v2 protocol, skip version vector test + btcRunner.SkipVersionVectorInitialization = true const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + opts := &BlipTesterClientOpts{} + opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - respBody := rt.GetDocVersion(docID, version) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // Test pushing and pulling v2 attachments with v3 client @@ -113,54 +120,59 @@ func TestBlipPushPullV2AttachmentV3Client(t *testing.T) { }, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client along with keeping the same attachment stub. - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(11), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client along with keeping the same attachment stub. + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := btc.rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(11), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } // TestBlipProveAttachmentV2 ensures that CBL's proveAttachment for deduplication is working correctly even for v2 attachments which aren't de-duped on the server side. @@ -169,56 +181,59 @@ func TestBlipProveAttachmentV2(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - - err = btc.StartPull() - assert.NoError(t, err) const ( doc1ID = "doc1" doc2ID = "doc2" ) - const ( attachmentName = "hello.txt" attachmentData = "hello world" ) - var ( attachmentDataB64 = base64.StdEncoding.EncodeToString([]byte(attachmentData)) attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, - // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version := rt.PutDoc(doc1ID, doc1Body) - - data, ok := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, ok) - bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) - - // create doc2 now that we know the client has the attachment - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version := rt.PutDoc(doc2ID, doc2Body) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - data, ok = btc.WaitForVersion(doc2ID, doc2Version) - require.True(t, ok) - bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) - require.JSONEq(t, bodyTextExpected, string(data)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) - assert.Equal(t, int64(1), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) - assert.Equal(t, int64(len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // Create two docs with the same attachment data on SG - v2 attachments intentionally result in two copies, + // CBL will still de-dupe attachments based on digest, so will still try proveAttachmnet for the 2nd. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version := btc.rt.PutDoc(doc1ID, doc1Body) + + data, ok := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, ok) + bodyTextExpected := fmt.Sprintf(`{"greetings":[{"hi":"alice"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + // create doc2 now that we know the client has the attachment + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version := btc.rt.PutDoc(doc2ID, doc2Body) + + data, ok = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + require.True(t, ok) + bodyTextExpected = fmt.Sprintf(`{"greetings":[{"howdy":"bob"}],"_attachments":{"%s":{"revpos":1,"length":%d,"stub":true,"digest":"%s"}}}`, attachmentName, len(attachmentData), attachmentDigest) + require.JSONEq(t, bodyTextExpected, string(data)) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevSendCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPull().RevErrorCount.Value()) + assert.Equal(t, int64(1), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullCount.Value()) + assert.Equal(t, int64(len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPull().AttachmentPullBytes.Value()) + }) } // TestBlipProveAttachmentV2Push ensures that CBL's attachment deduplication is ignored for push replications - resulting in new server-side digests and duplicated attachment data (v2 attachment format). @@ -227,50 +242,51 @@ func TestBlipProveAttachmentV2Push(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer btc.Close() - const ( doc1ID = "doc1" doc2ID = "doc2" ) - const ( attachmentName = "hello.txt" attachmentData = "hello world" ) - var ( attachmentDataB64 = base64.StdEncoding.EncodeToString([]byte(attachmentData)) // attachmentDigest = "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=" ) - // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. - doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc1Version, err := btc.PushRev(doc1ID, EmptyDocVersion(), []byte(doc1Body)) - require.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test - err = rt.WaitForVersion(doc1ID, doc1Version) - require.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. - doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) - doc2Version, err := btc.PushRev(doc2ID, EmptyDocVersion(), []byte(doc2Body)) - require.NoError(t, err) - - err = rt.WaitForVersion(doc2ID, doc2Version) - require.NoError(t, err) - - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) - assert.Equal(t, int64(0), rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) - assert.Equal(t, int64(2), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) - assert.Equal(t, int64(2*len(attachmentData)), rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer btc.Close() + // Create two docs with the same attachment data on the client - v2 attachments intentionally result in two copies stored on the server, despite the client being able to share the data for both. + doc1Body := fmt.Sprintf(`{"greetings":[{"hi": "alice"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc1Version, err := btcRunner.PushRev(btc.id, doc1ID, EmptyDocVersion(), []byte(doc1Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc1ID, doc1Version) + require.NoError(t, err) + + // create doc2 now that we know the server has the attachment - SG should still request the attachment data from the client. + doc2Body := fmt.Sprintf(`{"greetings":[{"howdy": "bob"}],"_attachments":{"%s":{"data":"%s"}}}`, attachmentName, attachmentDataB64) + doc2Version, err := btcRunner.PushRev(btc.id, doc2ID, EmptyDocVersion(), []byte(doc2Body)) + require.NoError(t, err) + + err = btc.rt.WaitForVersion(doc2ID, doc2Version) + require.NoError(t, err) + + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushCount.Value()) + assert.Equal(t, int64(0), btc.rt.GetDatabase().DbStats.CBLReplicationPush().DocPushErrorCount.Value()) + assert.Equal(t, int64(2), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushCount.Value()) + assert.Equal(t, int64(2*len(attachmentData)), btc.rt.GetDatabase().DbStats.CBLReplicationPush().AttachmentPushBytes.Value()) + }) } func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { @@ -278,130 +294,139 @@ func TestBlipPushPullNewAttachmentCommonAncestor(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "2-abc", revId) - - // Wait for the documents to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, - // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 - bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err = btc.PushRevWithHistory(docID, revId, []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(4) - assert.True(t, ok) - - resp = rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc,2-abc on the client, with an attachment associated with rev 2. + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "2-abc", revId) + + // Wait for the documents to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + // CBL updates the doc w/ two more revisions, 3-abc, 4-abc, + // these are sent to SG as 4-abc, history:[4-abc,3-abc,2-abc], the attachment has revpos=2 + bodyText = `{"greetings":[{"hi":"bob"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err = btcRunner.PushRevWithHistory(btc.id, docID, revId, []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(4) + assert.True(t, ok) + + resp = btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "bob"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } func TestBlipPushPullNewAttachmentNoCommonAncestor(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - err = btc.StartPull() - assert.NoError(t, err) const docID = "doc1" - - // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. - // rev tree pruning on the CBL side, so 1-abc no longer exists. - // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 - bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - err = btc.StoreRevOnClient(docID, "2-abc", []byte(bodyText)) - require.NoError(t, err) - - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - revId, err := btc.PushRevWithHistory(docID, "2-abc", []byte(bodyText), 2, 0) - require.NoError(t, err) - assert.Equal(t, "4-abc", revId) - - // Wait for the document to be replicated at SG - _, ok := btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - resp := rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") - assert.Equal(t, http.StatusOK, resp.Code) - - var respBody db.Body - assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) - - assert.Equal(t, docID, respBody[db.BodyId]) - assert.Equal(t, "4-abc", respBody[db.BodyRev]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 1) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(4), hello["revpos"]) - assert.True(t, hello["stub"].(bool)) - - // Check the number of sendProveAttachment/sendGetAttachment calls. - require.NotNil(t, btc.pushReplication.replicationStats) - assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) - assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + + // CBL creates revisions 1-abc, 2-abc, 3-abc, 4-abc on the client, with an attachment associated with rev 2. + // rev tree pruning on the CBL side, so 1-abc no longer exists. + // CBL replicates, sends to client as 4-abc history:[4-abc, 3-abc, 2-abc], attachment has revpos=2 + bodyText := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + err = btcRunner.StoreRevOnClient(btc.id, docID, "2-abc", []byte(bodyText)) + require.NoError(t, err) + + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":2,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + revId, err := btcRunner.PushRevWithHistory(btc.id, docID, "2-abc", []byte(bodyText), 2, 0) + require.NoError(t, err) + assert.Equal(t, "4-abc", revId) + + // Wait for the document to be replicated at SG + _, ok := btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + resp := btc.rt.SendAdminRequest(http.MethodGet, "/{{.keyspace}}/"+docID+"?rev="+revId, "") + assert.Equal(t, http.StatusOK, resp.Code) + + var respBody db.Body + assert.NoError(t, base.JSONUnmarshal(resp.Body.Bytes(), &respBody)) + + assert.Equal(t, docID, respBody[db.BodyId]) + assert.Equal(t, "4-abc", respBody[db.BodyRev]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 1) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(4), hello["revpos"]) + assert.True(t, hello["stub"].(bool)) + + // Check the number of sendProveAttachment/sendGetAttachment calls. + require.NotNil(t, btc.pushReplication.replicationStats) + assert.Equal(t, int64(1), btc.pushReplication.replicationStats.GetAttachment.Value()) + assert.Equal(t, int64(0), btc.pushReplication.replicationStats.ProveAttachment.Value()) + }) } // Test Attachment replication behavior described here: https://github.com/couchbase/couchbase-lite-core/wiki/Replication-Protocol @@ -507,163 +532,181 @@ func TestPutAttachmentViaBlipGetViaBlip(t *testing.T) { // TestBlipAttachNameChange tests CBL handling - attachments with changed names are sent as stubs, and not new attachments func TestBlipAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - attachmentA := []byte("attachmentA") - attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) - digest := db.Sha1DigestKey(attachmentA) - - // Push initial attachment data - version, err := client1.PushRev("doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) - require.NoError(t, err) - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attachmentA) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - version, err = client1.PushRev("doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - // Check if attachment is still in bucket - bucketAttachmentA, _, err = rt.GetSingleDataStore().GetRaw(attachmentAKey) - assert.NoError(t, err) - assert.Equal(t, bucketAttachmentA, attachmentA) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attachmentA, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + + attachmentA := []byte("attachmentA") + attachmentAData := base64.StdEncoding.EncodeToString(attachmentA) + digest := db.Sha1DigestKey(attachmentA) + + // Push initial attachment data + version, err := btcRunner.PushRev(client1.id, "doc", EmptyDocVersion(), []byte(`{"key":"val","_attachments":{"attachment": {"data":"`+attachmentAData+`"}}}`)) + require.NoError(t, err) + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(2, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attachmentA) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"","length":11,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + // Check if attachment is still in bucket + bucketAttachmentA, _, err = client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + assert.NoError(t, err) + assert.Equal(t, bucketAttachmentA, attachmentA) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attachmentA, resp.BodyBytes()) + }) } // TestBlipLegacyAttachNameChange ensures that CBL name changes for legacy attachments are handled correctly func TestBlipLegacyAttachNameChange(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, + } - // Create document in the bucket with a legacy attachment - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - // Get the document and grab the revID. - docVersion, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, docVersion.RevID, rawDoc) - - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - bucketAttachmentA, _, err := rt.GetSingleDataStore().GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Simulate changing only the attachment name over CBL - // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 - docVersion, err = client1.PushRev("doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", docVersion) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + // Get the document and grab the revID. + docVersion, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, docVersion.RevID, rawDoc) + + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + bucketAttachmentA, _, err := client1.rt.GetSingleDataStore().GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Simulate changing only the attachment name over CBL + // Use revpos 2 to simulate revpos bug in CBL 2.8 - 3.0.0 + docVersion, err = btcRunner.PushRev(client1.id, "doc", docVersion, []byte(`{"key":"val","_attachments":{"attach":{"revpos":2,"content_type":"test/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", docVersion) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", "/{{.keyspace}}/doc/attach", "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + }) } // TestBlipLegacyAttachDocUpdate ensures that CBL updates for documents associated with legacy attachments are handled correctly func TestBlipLegacyAttachDocUpdate(t *testing.T) { - rt := NewRestTester(t, &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() - client1, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client1.Close() base.SetUpTestLogging(t, base.LevelInfo, base.KeySync, base.KeySyncMsg, base.KeyWebSocket, base.KeyWebSocketFrame, base.KeyHTTP, base.KeyCRUD) - - // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta - docID := "doc" - attBody := []byte(`hi`) - digest := db.Sha1DigestKey(attBody) - attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) - attName := "hi.txt" - rawDoc := rawDocWithAttachmentAndSyncMeta() - - // Create a document with legacy attachment. - CreateDocWithLegacyAttachment(t, rt, docID, rawDoc, attKey, attBody) - - version, _ := rt.GetDoc(docID) - - // Store the document and attachment on the test client - err = client1.StoreRevOnClient(docID, version.RevID, rawDoc) - require.NoError(t, err) - client1.AttachmentsLock().Lock() - client1.Attachments()[digest] = attBody - client1.AttachmentsLock().Unlock() - - // Confirm attachment is in the bucket - attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) - dataStore := rt.GetSingleDataStore() - bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) - require.NoError(t, err) - require.EqualValues(t, bucketAttachmentA, attBody) - - // Update the document, leaving body intact - version, err = client1.PushRev("doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) - require.NoError(t, err) - - err = rt.WaitForVersion("doc", version) - require.NoError(t, err) - - resp := rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") - RequireStatus(t, resp, http.StatusOK) - assert.Equal(t, attBody, resp.BodyBytes()) - - // Validate that the attachment hasn't been migrated to V2 - v1Key := db.MakeAttachmentKey(1, "doc", digest) - v1Body, _, err := dataStore.GetRaw(v1Key) - require.NoError(t, err) - require.EqualValues(t, attBody, v1Body) - - v2Key := db.MakeAttachmentKey(2, "doc", digest) - _, _, err = dataStore.GetRaw(v2Key) - require.Error(t, err) - // Confirm correct type of error for both integration test and Walrus - if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { - var keyValueErr *gocb.KeyValueError - require.True(t, errors.As(err, &keyValueErr)) - //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) - require.Equal(t, keyValueErr.DocumentID, v2Key) + rtConfig := &RestTesterConfig{ + GuestEnabled: true, } + + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client1.Close() + // Create document in the bucket with a legacy attachment. Properties here align with rawDocWithAttachmentAndSyncMeta + docID := "doc" + attBody := []byte(`hi`) + digest := db.Sha1DigestKey(attBody) + attKey := db.MakeAttachmentKey(db.AttVersion1, docID, digest) + attName := "hi.txt" + rawDoc := rawDocWithAttachmentAndSyncMeta() + + // Create a document with legacy attachment. + CreateDocWithLegacyAttachment(t, client1.rt, docID, rawDoc, attKey, attBody) + + version, _ := client1.rt.GetDoc(docID) + + // Store the document and attachment on the test client + err := btcRunner.StoreRevOnClient(client1.id, docID, version.RevID, rawDoc) + require.NoError(t, err) + btcRunner.AttachmentsLock(client1.id).Lock() + btcRunner.Attachments(client1.id)[digest] = attBody + btcRunner.AttachmentsLock(client1.id).Unlock() + + // Confirm attachment is in the bucket + attachmentAKey := db.MakeAttachmentKey(1, "doc", digest) + dataStore := client1.rt.GetSingleDataStore() + bucketAttachmentA, _, err := dataStore.GetRaw(attachmentAKey) + require.NoError(t, err) + require.EqualValues(t, bucketAttachmentA, attBody) + + // Update the document, leaving body intact + version, err = btcRunner.PushRev(client1.id, "doc", version, []byte(`{"key":"val1","_attachments":{"`+attName+`":{"revpos":2,"content_type":"text/plain","length":2,"stub":true,"digest":"`+digest+`"}}}`)) + require.NoError(t, err) + + err = client1.rt.WaitForVersion("doc", version) + require.NoError(t, err) + + resp := client1.rt.SendAdminRequest("GET", fmt.Sprintf("/{{.keyspace}}/doc/%s", attName), "") + RequireStatus(t, resp, http.StatusOK) + assert.Equal(t, attBody, resp.BodyBytes()) + + // Validate that the attachment hasn't been migrated to V2 + v1Key := db.MakeAttachmentKey(1, "doc", digest) + v1Body, _, err := dataStore.GetRaw(v1Key) + require.NoError(t, err) + require.EqualValues(t, attBody, v1Body) + + v2Key := db.MakeAttachmentKey(2, "doc", digest) + _, _, err = dataStore.GetRaw(v2Key) + require.Error(t, err) + // Confirm correct type of error for both integration test and Walrus + if !errors.Is(err, sgbucket.MissingError{Key: v2Key}) { + var keyValueErr *gocb.KeyValueError + require.True(t, errors.As(err, &keyValueErr)) + //require.Equal(t, keyValueErr.StatusCode, memd.StatusKeyNotFound) + require.Equal(t, keyValueErr.DocumentID, v2Key) + } + }) } // TestAttachmentComputeStat: @@ -676,31 +719,33 @@ func TestAttachmentComputeStat(t *testing.T) { rtConfig := RestTesterConfig{ GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - opts := &BlipTesterClientOpts{} - opts.SupportedBLIPProtocols = []string{db.BlipCBMobileReplicationV2} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, opts) - require.NoError(t, err) - defer btc.Close() - syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Create doc revision with attachment on SG. - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) + syncProcessCompute := btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value() - // Wait for the document to be replicated to client. - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) - // assert the attachment read compute stat is incremented - require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + // Create doc revision with attachment on SG. + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := btc.rt.PutDoc(docID, bodyText) + // Wait for the document to be replicated to client. + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // assert the attachment read compute stat is incremented + require.Greater(t, btc.rt.GetDatabase().DbStats.DatabaseStats.SyncProcessCompute.Value(), syncProcessCompute) + }) } diff --git a/rest/blip_api_collections_test.go b/rest/blip_api_collections_test.go index 7839daa4ee..5663e1227e 100644 --- a/rest/blip_api_collections_test.go +++ b/rest/blip_api_collections_test.go @@ -28,322 +28,344 @@ func TestBlipGetCollections(t *testing.T) { // checkpointIDWithError := "checkpointError" const defaultScopeAndCollection = "_default._default" - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{GuestEnabled: true}, 1) - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, - &BlipTesterClientOpts{ + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 1) + defer rt.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ SkipCollectionsInitialization: true, - }, - ) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - testCases := []struct { - name string - requestBody db.GetCollectionsRequestBody - resultBody []db.Body - errorCode string - }{ - { - name: "noDocInDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{defaultScopeAndCollection}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + scopeAndCollection := fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name) + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + testCases := []struct { + name string + requestBody db.GetCollectionsRequestBody + resultBody []db.Body + errorCode string + }{ + { + name: "noDocInDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "mismatchedLengthOnInput", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id", "id2"}, - Collections: []string{defaultScopeAndCollection}, + { + name: "mismatchedLengthOnInput", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id", "id2"}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "inDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{defaultScopeAndCollection}, + { + name: "inDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{defaultScopeAndCollection}, + }, + resultBody: []db.Body{nil}, + errorCode: "", }, - resultBody: []db.Body{nil}, - errorCode: "", - }, - { - name: "badScopeSpecificationEmptyString", - // bad scope specification - empty string - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{""}, + { + name: "badScopeSpecificationEmptyString", + // bad scope specification - empty string + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{""}, + }, + resultBody: []db.Body{nil}, + errorCode: fmt.Sprintf("%d", http.StatusBadRequest), }, - resultBody: []db.Body{nil}, - errorCode: fmt.Sprintf("%d", http.StatusBadRequest), - }, - { - name: "presentNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{scopeAndCollection}, + { + name: "presentNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{checkpoint1Body}, + errorCode: "", }, - resultBody: []db.Body{checkpoint1Body}, - errorCode: "", - }, - { - name: "unseenInNonDefaultCollection", - requestBody: db.GetCollectionsRequestBody{ - CheckpointIDs: []string{"id"}, - Collections: []string{scopeAndCollection}, + { + name: "unseenInNonDefaultCollection", + requestBody: db.GetCollectionsRequestBody{ + CheckpointIDs: []string{"id"}, + Collections: []string{scopeAndCollection}, + }, + resultBody: []db.Body{db.Body{}}, + errorCode: "", }, - resultBody: []db.Body{db.Body{}}, - errorCode: "", - }, - // { - // name: "checkpointExistsWithErrorInNonDefaultCollection", - // requestBody: db.GetCollectionsRequestBody{ - // CheckpointIDs: []string{checkpointIDWithError}, - // Collections: []string{scopeAndCollection}, - // }, - // resultBody: []db.Body{nil}, - // errorCode: "", - // }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the norev handler - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) - require.Equal(t, errorCode, testCase.errorCode) - if testCase.errorCode != "" { - return - } - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) + // { + // name: "checkpointExistsWithErrorInNonDefaultCollection", + // requestBody: db.GetCollectionsRequestBody{ + // CheckpointIDs: []string{checkpointIDWithError}, + // Collections: []string{scopeAndCollection}, + // }, + // resultBody: []db.Body{nil}, + // errorCode: "", + // }, + } - require.Equal(t, testCase.resultBody, checkpoints) - }) - } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + getCollectionsRequest, err := db.NewGetCollectionsMessage(testCase.requestBody) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the norev handler + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.Equal(t, hasErrorCode, testCase.errorCode != "", "Request returned unexpected error %+v", resp.Properties) + require.Equal(t, errorCode, testCase.errorCode) + if testCase.errorCode != "" { + return + } + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + + require.Equal(t, testCase.resultBody, checkpoints) + }) + } + }) } func TestBlipReplicationNoDefaultCollection(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + + subChangesRequest := blip.NewRequest() + subChangesRequest.SetProfile(db.MessageSubChanges) + + require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) + resp := subChangesRequest.Response() + require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - - subChangesRequest := blip.NewRequest() - subChangesRequest.SetProfile(db.MessageSubChanges) - - require.NoError(t, btc.pullReplication.sendMsg(subChangesRequest)) - resp := subChangesRequest.Response() - require.Equal(t, strconv.Itoa(http.StatusBadRequest), resp.Properties[db.BlipErrorCode]) } func TestBlipGetCollectionsAndSetCheckpoint(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - checkpointID1 := "checkpoint1" - checkpoint1Body := db.Body{"seq": "123"} - collection := rt.GetSingleTestDatabaseCollection() - revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) - require.NoError(t, err) - checkpoint1RevID := "0-1" - require.Equal(t, checkpoint1RevID, revID) - getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ - CheckpointIDs: []string{checkpointID1}, - Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, - }) - - require.NoError(t, err) - - require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) - - // Check that the response we got back was processed by the GetCollections - resp := getCollectionsRequest.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] - require.False(t, hasErrorCode) - require.Equal(t, errorCode, "") - var checkpoints []db.Body - err = resp.ReadJSONBody(&checkpoints) - require.NoErrorf(t, err, "Actual error %+v", checkpoints) - require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) - - // make sure other functions get called - - requestGetCheckpoint := blip.NewRequest() - requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) - requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 - requestGetCheckpoint.Properties[db.BlipCollection] = "0" - require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) - resp = requestGetCheckpoint.Response() - require.NotNil(t, resp) - errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] - require.Equal(t, errorCode, "") - require.False(t, hasErrorCode) - var checkpoint db.Body - err = resp.ReadJSONBody(&checkpoint) - require.NoErrorf(t, err, "Actual error %+v", checkpoint) - - require.Equal(t, db.Body{"seq": "123"}, checkpoint) + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + checkpointID1 := "checkpoint1" + checkpoint1Body := db.Body{"seq": "123"} + collection := btc.rt.GetSingleTestDatabaseCollection() + revID, err := collection.PutSpecial(db.DocTypeLocal, db.CheckpointDocIDPrefix+checkpointID1, checkpoint1Body) + require.NoError(t, err) + checkpoint1RevID := "0-1" + require.Equal(t, checkpoint1RevID, revID) + getCollectionsRequest, err := db.NewGetCollectionsMessage(db.GetCollectionsRequestBody{ + CheckpointIDs: []string{checkpointID1}, + Collections: []string{fmt.Sprintf("%s.%s", collection.ScopeName, collection.Name)}, + }) + require.NoError(t, err) + + require.NoError(t, btc.pushReplication.sendMsg(getCollectionsRequest)) + + // Check that the response we got back was processed by the GetCollections + resp := getCollectionsRequest.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode := resp.Properties[db.BlipErrorCode] + require.False(t, hasErrorCode) + require.Equal(t, errorCode, "") + var checkpoints []db.Body + err = resp.ReadJSONBody(&checkpoints) + require.NoErrorf(t, err, "Actual error %+v", checkpoints) + require.Equal(t, []db.Body{checkpoint1Body}, checkpoints) + + // make sure other functions get called + + requestGetCheckpoint := blip.NewRequest() + requestGetCheckpoint.SetProfile(db.MessageGetCheckpoint) + requestGetCheckpoint.Properties[db.BlipClient] = checkpointID1 + requestGetCheckpoint.Properties[db.BlipCollection] = "0" + require.NoError(t, btc.pushReplication.sendMsg(requestGetCheckpoint)) + resp = requestGetCheckpoint.Response() + require.NotNil(t, resp) + errorCode, hasErrorCode = resp.Properties[db.BlipErrorCode] + require.Equal(t, errorCode, "") + require.False(t, hasErrorCode) + var checkpoint db.Body + err = resp.ReadJSONBody(&checkpoint) + require.NoErrorf(t, err, "Actual error %+v", checkpoint) + + require.Equal(t, db.Body{"seq": "123"}, checkpoint) + }) } func TestCollectionsReplication(t *testing.T) { base.TestRequiresCollections(t) - rt := NewRestTester(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }) - defer rt.Close() + } + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - const docID = "doc1" - version := rt.PutDoc(docID, "{}") - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + version := btc.rt.PutDoc(docID, "{}") + require.NoError(t, btc.rt.WaitForPendingChanges()) - btcCollection := btc.SingleCollection() + btcCollection := btcRunner.SingleCollection(btc.id) - err = btcCollection.StartOneshotPull() - require.NoError(t, err) + err := btcCollection.StartOneshotPull() + require.NoError(t, err) - _, ok := btcCollection.WaitForVersion(docID, version) - require.True(t, ok) + _, ok := btcCollection.WaitForVersion(docID, version) + require.True(t, ok) + }) } func TestBlipReplicationMultipleCollections(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + } + btcRunner := NewBlipTesterClientRunner(t) - docName := "doc1" - body := `{"foo":"bar"}` - versions := make([]DocVersion, 0, len(rt.GetKeyspaces())) - for _, keyspace := range rt.GetKeyspaces() { - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) - RequireStatus(t, resp, http.StatusCreated) - versions = append(versions, DocVersionFromPutResponse(t, resp)) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() - } - require.NoError(t, rt.WaitForPendingChanges()) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartPull()) - } + docName := "doc1" + body := `{"foo":"bar"}` + versions := make([]DocVersion, 0, len(btc.rt.GetKeyspaces())) + for _, keyspace := range btc.rt.GetKeyspaces() { + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, `{"foo":"bar"}`) + RequireStatus(t, resp, http.StatusCreated) + versions = append(versions, DocVersionFromPutResponse(t, resp)) + } + require.NoError(t, btc.rt.WaitForPendingChanges()) - for i, collectionClient := range btc.collectionClients { - msg, ok := collectionClient.WaitForVersion(docName, versions[i]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartPull()) + } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for i, collectionClient := range btc.collectionClients { + msg, ok := collectionClient.WaitForVersion(docName, versions[i]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } func TestBlipReplicationMultipleCollectionsMismatchedDocSizes(t *testing.T) { - rt := NewRestTesterMultipleCollections(t, &RestTesterConfig{ + rtConfig := &RestTesterConfig{ GuestEnabled: true, - }, 2) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - body := `{"foo":"bar"}` - collectionDocIDs := make(map[string][]string) - collectionVersions := make(map[string][]DocVersion) - require.Len(t, rt.GetKeyspaces(), 2) - for i, keyspace := range rt.GetKeyspaces() { - // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one - docCount := 10 - if i == 0 { - docCount = 1 + } + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTesterMultipleCollections(t, rtConfig, 2) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + body := `{"foo":"bar"}` + collectionDocIDs := make(map[string][]string) + collectionVersions := make(map[string][]DocVersion) + require.Len(t, btc.rt.GetKeyspaces(), 2) + for i, keyspace := range btc.rt.GetKeyspaces() { + // intentionally create collections with different size replications to ensure one collection finishing won't cancel another one + docCount := 10 + if i == 0 { + docCount = 1 + } + blipName := btc.rt.getCollectionsForBLIP()[i] + for j := 0; j < docCount; j++ { + docName := fmt.Sprintf("doc%d", j) + resp := btc.rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) + RequireStatus(t, resp, http.StatusCreated) + + version := DocVersionFromPutResponse(t, resp) + collectionVersions[blipName] = append(collectionVersions[blipName], version) + collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + } } - blipName := rt.getCollectionsForBLIP()[i] - for j := 0; j < docCount; j++ { - docName := fmt.Sprintf("doc%d", j) - resp := rt.SendAdminRequest(http.MethodPut, "/"+keyspace+"/"+docName, body) - RequireStatus(t, resp, http.StatusCreated) + require.NoError(t, btc.rt.WaitForPendingChanges()) - version := DocVersionFromPutResponse(t, resp) - collectionVersions[blipName] = append(collectionVersions[blipName], version) - collectionDocIDs[blipName] = append(collectionDocIDs[blipName], docName) + // start all the clients first + for _, collectionClient := range btc.collectionClients { + require.NoError(t, collectionClient.StartOneshotPull()) } - } - require.NoError(t, rt.WaitForPendingChanges()) - - // start all the clients first - for _, collectionClient := range btc.collectionClients { - require.NoError(t, collectionClient.StartOneshotPull()) - } - - for _, collectionClient := range btc.collectionClients { - versions := collectionVersions[collectionClient.collection] - docIDs := collectionDocIDs[collectionClient.collection] - msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) - require.True(t, ok) - require.Equal(t, body, string(msg)) - } - for _, collectionClient := range btc.collectionClients { - resp, err := collectionClient.UnsubPullChanges() - assert.NoError(t, err, "Error unsubing: %+v", resp) - } + for _, collectionClient := range btc.collectionClients { + versions := collectionVersions[collectionClient.collection] + docIDs := collectionDocIDs[collectionClient.collection] + msg, ok := collectionClient.WaitForVersion(docIDs[len(docIDs)-1], versions[len(versions)-1]) + require.True(t, ok) + require.Equal(t, body, string(msg)) + } + for _, collectionClient := range btc.collectionClients { + resp, err := collectionClient.UnsubPullChanges() + assert.NoError(t, err, "Error unsubing: %+v", resp) + } + }) } diff --git a/rest/blip_api_crud_test.go b/rest/blip_api_crud_test.go index 7c041f7cb5..581abbe797 100644 --- a/rest/blip_api_crud_test.go +++ b/rest/blip_api_crud_test.go @@ -1836,64 +1836,73 @@ func TestBlipPullRevMessageHistory(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + const docID = "doc1" + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + data, ok := btcRunner.WaitForVersion(client.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version + data, ok = btcRunner.WaitForVersion(client.id, docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + assert.Equal(t, version1.RevID, msg.Properties[db.RevMessageHistory]) // CBG-3268 update to use version + }) } // Reproduces CBG-617 (a client using activeOnly for the initial replication, and then still expecting to get subsequent tombstones afterwards) func TestActiveOnlyContinuous(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) + rtConfig := &RestTesterConfig{GuestEnabled: true} - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - const docID = "doc1" - version := rt.PutDoc(docID, `{"test":true}`) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // start an initial pull - require.NoError(t, btc.StartPullSince("true", "0", "true")) - rev, found := btc.WaitForVersion(docID, version) - assert.True(t, found) - assert.Equal(t, `{"test":true}`, string(rev)) + version := rt.PutDoc(docID, `{"test":true}`) - // delete the doc and make sure the client still gets the tombstone replicated - deletedVersion := rt.DeleteDocReturnVersion(docID, version) + // start an initial pull + require.NoError(t, btcRunner.StartPullSince(btc.id, "true", "0", "true")) + rev, found := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, found) + assert.Equal(t, `{"test":true}`, string(rev)) - rev, found = btc.WaitForVersion(docID, deletedVersion) - assert.True(t, found) - assert.Equal(t, `{}`, string(rev)) + // delete the doc and make sure the client still gets the tombstone replicated + deletedVersion := rt.DeleteDocReturnVersion(docID, version) + + rev, found = btcRunner.WaitForVersion(btc.id, docID, deletedVersion) + assert.True(t, found) + assert.Equal(t, `{}`, string(rev)) + }) } // Test that exercises Sync Gateway's norev handler @@ -1901,34 +1910,39 @@ func TestBlipNorev(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} + btcRunner := NewBlipTesterClientRunner(t) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - norevMsg := db.NewNoRevMessage() - norevMsg.SetId("docid") - norevMsg.SetRev("1-a") - norevMsg.SetSequence(db.SequenceID{Seq: 50}) - norevMsg.SetError("404") - norevMsg.SetReason("couldn't send xyz") - btc.addCollectionProperty(norevMsg.Message) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + norevMsg := db.NewNoRevMessage() + norevMsg.SetId("docid") + norevMsg.SetRev("1-a") + norevMsg.SetSequence(db.SequenceID{Seq: 50}) + norevMsg.SetError("404") + norevMsg.SetReason("couldn't send xyz") + btc.addCollectionProperty(norevMsg.Message) - // Couchbase Lite always sends noreply=true for norev messages - // but set to false so we can block waiting for a reply - norevMsg.SetNoReply(false) + // Couchbase Lite always sends noreply=true for norev messages + // but set to false so we can block waiting for a reply + norevMsg.SetNoReply(false) - // Request that the handler used to process the message is sent back in the response - norevMsg.Properties[db.SGShowHandler] = "true" + // Request that the handler used to process the message is sent back in the response + norevMsg.Properties[db.SGShowHandler] = "true" - assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) + assert.NoError(t, btc.pushReplication.sendMsg(norevMsg.Message)) - // Check that the response we got back was processed by the norev handler - resp := norevMsg.Response() - assert.NotNil(t, resp) - assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) + // Check that the response we got back was processed by the norev handler + resp := norevMsg.Response() + assert.NotNil(t, resp) + assert.Equal(t, "handleNoRev", resp.Properties[db.SGHandler]) + }) } // TestNoRevSetSeq makes sure the correct string is used with the corresponding function @@ -1949,99 +1963,103 @@ func TestRemovedMessageWithAlternateAccess(t *testing.T) { defer db.SuspendSequenceBatching()() base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) + const docID = "doc" + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) + version = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - version = rt.UpdateDoc(docID, version, `{"channels": []}`) - const docMarker = "docmarker" - docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) - assert.Equal(t, "doc", changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) - assert.False(t, changes.Results[0].Revoked) - assert.Equal(t, "docmarker", changes.Results[1].ID) - RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) - assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) - assert.False(t, changes.Results[1].Revoked) - - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docMarker, docMarkerVersion) - assert.True(t, ok) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - messages := btc.pullReplication.GetMessages() + version = rt.UpdateDoc(docID, version, `{"channels": []}`) + const docMarker = "docmarker" + docMarkerVersion := rt.PutDoc(docMarker, `{"channels": ["!"]}`) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() + changes, err = rt.WaitForChanges(2, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s&revocations=true", changes.Last_Seq), "user", true) require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + assert.Len(t, changes.Results, 2) + assert.Equal(t, "doc", changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + assert.Equal(t, "3-1bc9dd04c8a257ba28a41eaad90d32de", changes.Results[0].Changes[0]["rev"]) + assert.False(t, changes.Results[0].Revoked) + assert.Equal(t, "docmarker", changes.Results[1].ID) + RequireChangeRevVersion(t, docMarkerVersion, changes.Results[1].Changes[0]) + assert.Equal(t, "1-999bcad4aab47f0a8a24bd9d3598060c", changes.Results[1].Changes[0]["rev"]) + assert.False(t, changes.Results[1].Revoked) + + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docMarker, docMarkerVersion) + assert.True(t, ok) + + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 3) - require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A - require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B - require.Len(t, messageBody[2], 3) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 3) + require.Len(t, messageBody[0], 4) // Rev 2 of doc, being sent as removal from channel A + require.Len(t, messageBody[1], 4) // Rev 3 of doc, being sent as removal from channel B + require.Len(t, messageBody[2], 3) - deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - id := messageBody[0].([]interface{})[1] - require.NoError(t, err) - assert.Equal(t, "doc", id) - assert.Equal(t, int64(4), deletedFlags) + deletedFlags, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + id := messageBody[0].([]interface{})[1] + require.NoError(t, err) + assert.Equal(t, "doc", id) + assert.Equal(t, int64(4), deletedFlags) + }) } // TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication tests the following scenario: @@ -2057,91 +2075,95 @@ func TestRemovedMessageWithAlternateAccessAndChannelFilteredReplication(t *testi defer db.SuspendSequenceBatching()() base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) - resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) - RequireStatus(t, resp, http.StatusCreated) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &RestTesterConfig{SyncFn: channels.DocChannelsSyncFunction}) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - require.NoError(t, err) - defer btc.Close() + resp := rt.SendAdminRequest("PUT", "/db/_user/user", GetUserPayload(t, "user", "test", "", collection, []string{"A", "B"}, nil)) + RequireStatus(t, resp, http.StatusCreated) - const ( - docID = "doc" - ) - version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + const ( + docID = "doc" + ) + version := rt.PutDoc(docID, `{"channels": ["A", "B"]}`) - err = btc.StartOneshotPull() - assert.NoError(t, err) - _, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) + changes, err := rt.WaitForChanges(1, "/{{.keyspace}}/_changes?since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) - require.NoError(t, rt.WaitForPendingChanges()) - // At this point changes should send revocation, as document isn't in any of the user's channels - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Equal(t, 1, len(changes.Results)) - assert.Equal(t, docID, changes.Results[0].ID) - RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) + err = btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(docID, version) - assert.True(t, ok) + version = rt.UpdateDoc(docID, version, `{"channels": ["C"]}`) + require.NoError(t, rt.WaitForPendingChanges()) + // At this point changes should send revocation, as document isn't in any of the user's channels + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Equal(t, 1, len(changes.Results)) + assert.Equal(t, docID, changes.Results[0].ID) + RequireChangeRevVersion(t, version, changes.Results[0].Changes[0]) - _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) - markerID := "docmarker" - markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) - require.NoError(t, rt.WaitForPendingChanges()) + err = btcRunner.StartOneshotPullFiltered(btc.id, "A") + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) - // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received - changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) - require.NoError(t, err) - assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API - assert.Equal(t, "doc", changes.Results[0].ID) - assert.Equal(t, markerID, changes.Results[1].ID) + _ = rt.UpdateDoc(docID, version, `{"channels": ["B"]}`) + markerID := "docmarker" + markerVersion := rt.PutDoc(markerID, `{"channels": ["A"]}`) + require.NoError(t, rt.WaitForPendingChanges()) - err = btc.StartOneshotPullFiltered("A") - assert.NoError(t, err) - _, ok = btc.WaitForVersion(markerID, markerVersion) - assert.True(t, ok) + // Revocation should not be sent over blip, as document is now in user's channels - only marker document should be received + changes, err = rt.WaitForChanges(1, "/{{.keyspace}}/_changes?filter=sync_gateway/bychannel&channels=A&since=0&revocations=true", "user", true) + require.NoError(t, err) + assert.Len(t, changes.Results, 2) // _changes still gets two results, as we don't support 3.0 removal handling over REST API + assert.Equal(t, "doc", changes.Results[0].ID) + assert.Equal(t, markerID, changes.Results[1].ID) - messages := btc.pullReplication.GetMessages() + err = btcRunner.StartOneshotPullFiltered(btc.id, "A") + assert.NoError(t, err) + _, ok = btcRunner.WaitForVersion(btc.id, markerID, markerVersion) + assert.True(t, ok) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - assert.NoError(t, err) - require.Len(t, messageBody, 1) - require.Len(t, messageBody[0], 3) // marker doc - require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + assert.NoError(t, err) + require.Len(t, messageBody, 1) + require.Len(t, messageBody[0], 3) // marker doc + require.Equal(t, "docmarker", messageBody[0].([]interface{})[1]) + }) } // Make sure that a client cannot open multiple subChanges subscriptions on a single blip context (SG #3222) @@ -2361,54 +2383,58 @@ func TestBlipInternalPropertiesHandling(t *testing.T) { }, } - // Setup - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - }) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + // Setup + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + }) + defer rt.Close() - // Track last sequence for next changes feed - var changes ChangesResults - changes.Last_Seq = "0" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - for i, test := range testCases { - t.Run(test.name, func(t *testing.T) { - docID := fmt.Sprintf("test%d", i) - rawBody, err := json.Marshal(test.inputBody) - require.NoError(t, err) + // Track last sequence for next changes feed + var changes ChangesResults + changes.Last_Seq = "0" - _, err = client.PushRev(docID, EmptyDocVersion(), rawBody) + for i, test := range testCases { + t.Run(test.name, func(t *testing.T) { + docID := fmt.Sprintf("test%d", i) + rawBody, err := json.Marshal(test.inputBody) + require.NoError(t, err) - if test.expectReject { - assert.Error(t, err) - return - } - assert.NoError(t, err) - - // Wait for rev to be received on RT - err = rt.WaitForPendingChanges() - require.NoError(t, err) - changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) - require.NoError(t, err) + _, err = btcRunner.PushRev(client.id, docID, EmptyDocVersion(), rawBody) - var bucketDoc map[string]interface{} - _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) - assert.NoError(t, err) - body := rt.GetDocBody(docID) - // Confirm input body is in the bucket doc - if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { - for k, v := range test.inputBody { - assert.Equal(t, v, bucketDoc[k]) - assert.Equal(t, v, body[k]) + if test.expectReject { + assert.Error(t, err) + return } - } - }) - } + assert.NoError(t, err) + + // Wait for rev to be received on RT + err = rt.WaitForPendingChanges() + require.NoError(t, err) + changes, err = rt.WaitForChanges(1, fmt.Sprintf("/{{.keyspace}}/_changes?since=%s", changes.Last_Seq), "", true) + require.NoError(t, err) + + var bucketDoc map[string]interface{} + _, err = rt.GetSingleDataStore().Get(docID, &bucketDoc) + assert.NoError(t, err) + body := rt.GetDocBody(docID) + // Confirm input body is in the bucket doc + if test.skipDocContentsVerification == nil || !*test.skipDocContentsVerification { + for k, v := range test.inputBody { + assert.Equal(t, v, bucketDoc[k]) + assert.Equal(t, v, body[k]) + } + } + }) + } + }) } // CBG-2053: Test that the handleRev stats still increment correctly when going through the processRev function with @@ -2541,120 +2567,129 @@ func TestSendRevisionNoRevHandling(t *testing.T) { expectNoRev: false, }, } - for _, test := range testCases { - t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { - docName := fmt.Sprintf("%s", test.error) - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: true, - CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), - }) - defer rt.Close() - - leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) - require.True(t, ok) - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - - // Change noRev handler so it's known when a noRev is received - recievedNoRevs := make(chan *blip.Message) - btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { - fmt.Println("Received noRev", msg.Properties) - recievedNoRevs <- msg - } - - version := rt.PutDoc(docName, `{"foo":"bar"}`) - - // Make the LeakyBucket return an error - leakyDataStore.SetGetRawCallback(func(key string) error { - return test.error - }) - leakyDataStore.SetGetWithXattrCallback(func(key string) error { - return test.error - }) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + for _, test := range testCases { + t.Run(fmt.Sprintf("%s", test.error), func(t *testing.T) { + docName := fmt.Sprintf("%s", test.error) + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: true, + CustomTestBucket: base.GetTestBucket(t).LeakyBucketClone(base.LeakyBucketConfig{}), + }) + defer rt.Close() + + leakyDataStore, ok := base.AsLeakyDataStore(rt.Bucket().DefaultDataStore()) + require.True(t, ok) + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + + // Change noRev handler so it's known when a noRev is received + recievedNoRevs := make(chan *blip.Message) + btc.pullReplication.bt.blipContext.HandlerForProfile[db.MessageNoRev] = func(msg *blip.Message) { + fmt.Println("Received noRev", msg.Properties) + recievedNoRevs <- msg + } - // Flush cache so document has to be retrieved from the leaky bucket - rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + version := rt.PutDoc(docName, `{"foo":"bar"}`) - err = btc.StartPull() - require.NoError(t, err) + // Make the LeakyBucket return an error + leakyDataStore.SetGetRawCallback(func(key string) error { + return test.error + }) + leakyDataStore.SetGetWithXattrCallback(func(key string) error { + return test.error + }) - // Wait 3 seconds for noRev to be received - select { - case msg := <-recievedNoRevs: - if test.expectNoRev { - assert.Equal(t, docName, msg.Properties["id"]) - } else { - require.Fail(t, "Received unexpected noRev message", msg) - } - case <-time.After(3 * time.Second): - if test.expectNoRev { - require.Fail(t, "Didn't receive expected noRev") + // Flush cache so document has to be retrieved from the leaky bucket + rt.GetSingleTestDatabaseCollection().FlushRevisionCacheForTest() + + err := btcRunner.StartPull(btc.id) + require.NoError(t, err) + + // Wait 3 seconds for noRev to be received + select { + case msg := <-recievedNoRevs: + if test.expectNoRev { + assert.Equal(t, docName, msg.Properties["id"]) + } else { + require.Fail(t, "Received unexpected noRev message", msg) + } + case <-time.After(3 * time.Second): + if test.expectNoRev { + require.Fail(t, "Didn't receive expected noRev") + } } - } - // Make sure document did not get replicated - _, found := btc.GetVersion(docName, version) - assert.False(t, found) - }) - } + // Make sure document did not get replicated + _, found := btcRunner.GetVersion(btc.id, docName, version) + assert.False(t, found) + }) + } + }) } func TestUnsubChanges(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - rt := NewRestTester(t, &RestTesterConfig{GuestEnabled: true}) - - defer rt.Close() + rtConfig := &RestTesterConfig{GuestEnabled: true} - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() - // Confirm no error message or panic is returned in response - response, err := btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - - // Sub changes - err = btc.StartPull() - require.NoError(t, err) + btcRunner := NewBlipTesterClientRunner(t) const ( doc1ID = "doc1ID" doc2ID = "doc2ID" ) - doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) - _, found := btc.WaitForVersion(doc1ID, doc1Version) - require.True(t, found) - activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous - require.EqualValues(t, 1, activeReplStat.Value()) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - // Unsub changes - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) - // Wait for unsub changes to stop the sub changes being sent before sending document up - base.RequireWaitForStat(t, activeReplStat.Value, 0) - - // Confirm no more changes are being sent - doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) - err = rt.WaitForConditionWithOptions(func() bool { - _, found = btc.GetVersion("doc2", doc2Version) - return found - }, 10, 100) - assert.Error(t, err) - - // Confirm no error message is still returned when no subchanges active - response, err = btc.UnsubPullChanges() - assert.NoError(t, err) - assert.Empty(t, response) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() + // Confirm no error message or panic is returned in response + response, err := btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) - // Confirm the pull replication can be restarted and it syncs doc2 - err = btc.StartPull() - require.NoError(t, err) - _, found = btc.WaitForVersion(doc2ID, doc2Version) - assert.True(t, found) + // Sub changes + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + + doc1Version := rt.PutDoc(doc1ID, `{"key":"val1"}`) + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, doc1Version) + require.True(t, found) + + activeReplStat := rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplActiveContinuous + require.EqualValues(t, 1, activeReplStat.Value()) + + // Unsub changes + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + // Wait for unsub changes to stop the sub changes being sent before sending document up + base.RequireWaitForStat(t, activeReplStat.Value, 0) + + // Confirm no more changes are being sent + doc2Version := rt.PutDoc(doc2ID, `{"key":"val1"}`) + err = rt.WaitForConditionWithOptions(func() bool { + _, found = btcRunner.GetVersion(btc.id, "doc2", doc2Version) + return found + }, 10, 100) + assert.Error(t, err) + + // Confirm no error message is still returned when no subchanges active + response, err = btcRunner.UnsubPullChanges(btc.id) + assert.NoError(t, err) + assert.Empty(t, response) + + // Confirm the pull replication can be restarted and it syncs doc2 + err = btcRunner.StartPull(btc.id) + require.NoError(t, err) + _, found = btcRunner.WaitForVersion(btc.id, doc2ID, doc2Version) + assert.True(t, found) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the replication. @@ -2671,47 +2706,49 @@ func TestRequestPlusPull(t *testing.T) { } }`, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() - - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() + + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Start a regular one-shot pull - err = client.StartOneshotPullRequestPlus() - assert.NoError(t, err) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPullRequestPlus(client.id) + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestRequestPlusPull tests that a one-shot pull replication waits for pending changes when request plus is set on the db config. @@ -2733,47 +2770,50 @@ func TestRequestPlusPullDbConfig(t *testing.T) { }, }, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - database := rt.GetDatabase() - // Initialize blip tester client (will create user) - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - }) - require.NoError(t, err) - defer client.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + database := rt.GetDatabase() - // Put a doc in channel PBS - response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) - RequireStatus(t, response, 201) + // Initialize blip tester client (will create user) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped - slowSequence, seqErr := db.AllocateTestSequence(database) - require.NoError(t, seqErr) + // Put a doc in channel PBS + response := rt.SendAdminRequest("PUT", "/{{.keyspace}}/pbs-1", `{"channel":["PBS"]}`) + RequireStatus(t, response, 201) - // Write a document granting user 'bernard' access to PBS - response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) - RequireStatus(t, response, 201) + // Allocate a sequence but do not write a doc for it - will block DCP buffering until sequence is skipped + slowSequence, seqErr := db.AllocateTestSequence(database) + require.NoError(t, seqErr) - caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() + // Write a document granting user 'bernard' access to PBS + response = rt.SendAdminRequest("PUT", "/{{.keyspace}}/grantDoc", `{"accessUser":"bernard", "accessChannel":"PBS"}`) + RequireStatus(t, response, 201) - // Start a regular one-shot pull - err = client.StartOneshotPull() - assert.NoError(t, err) + caughtUpStart := database.DbStats.CBLReplicationPull().NumPullReplTotalCaughtUp.Value() - // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence - require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) + // Start a regular one-shot pull + err := btcRunner.StartOneshotPull(client.id) + assert.NoError(t, err) - // Release the slow sequence - releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) - require.NoError(t, releaseErr) + // Wait for the one-shot changes feed to go into wait mode before releasing the slow sequence + require.NoError(t, database.WaitForTotalCaughtUp(caughtUpStart+1)) - // The one-shot pull should unblock and replicate the document in the granted channel - data, ok := client.WaitForDoc("pbs-1") - assert.True(t, ok) - assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + // Release the slow sequence + releaseErr := db.ReleaseTestSequence(base.TestCtx(t), database, slowSequence) + require.NoError(t, releaseErr) + // The one-shot pull should unblock and replicate the document in the granted channel + data, ok := btcRunner.WaitForDoc(client.id, "pbs-1") + assert.True(t, ok) + assert.Equal(t, `{"channel":["PBS"]}`, string(data)) + }) } // TestBlipRefreshUser makes sure there is no panic if a user gets deleted during a replication @@ -2794,53 +2834,56 @@ func TestBlipRefreshUser(t *testing.T) { rtConfig := RestTesterConfig{ SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - const username = "bernard" - // Initialize blip tester client (will create user) - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bernard", - Channels: []string{"chan1"}, - }) + const docID = "doc1" - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + const username = "bernard" + // Initialize blip tester client (will create user) + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bernard", + Channels: []string{"chan1"}, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // add chan1 explicitly - response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) - RequireStatus(t, response, http.StatusOK) + // add chan1 explicitly + response := rt.SendAdminRequest(http.MethodPut, "/{{.db}}/_user/"+username, GetUserPayload(rt.TB, "", RestTesterDefaultUserPassword, "", rt.GetSingleTestDatabaseCollection(), []string{"chan1"}, nil)) + RequireStatus(t, response, http.StatusOK) - const docID = "doc1" - version := rt.PutDoc(docID, `{"channels":["chan1"]}`) + version := rt.PutDoc(docID, `{"channels":["chan1"]}`) - // Start a regular one-shot pull - err = btc.StartPullSince("true", "0", "false") - require.NoError(t, err) + // Start a regular one-shot pull + err := btcRunner.StartPullSince(btc.id, "true", "0", "false") + require.NoError(t, err) - _, ok := btc.WaitForDoc(docID) - require.True(t, ok) + _, ok := btcRunner.WaitForDoc(btc.id, docID) + require.True(t, ok) - _, ok = btc.GetVersion(docID, version) - require.True(t, ok) + _, ok = btcRunner.GetVersion(btc.id, docID, version) + require.True(t, ok) - // delete user with an active blip connection - response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") - RequireStatus(t, response, http.StatusOK) + // delete user with an active blip connection + response = rt.SendAdminRequest(http.MethodDelete, "/{{.db}}/_user/"+username, "") + RequireStatus(t, response, http.StatusOK) - require.NoError(t, rt.WaitForPendingChanges()) + require.NoError(t, rt.WaitForPendingChanges()) - // further requests will 500, but shouldn't panic - unsubChangesRequest := blip.NewRequest() - unsubChangesRequest.SetProfile(db.MessageUnsubChanges) - btc.addCollectionProperty(unsubChangesRequest) + // further requests will 500, but shouldn't panic + unsubChangesRequest := blip.NewRequest() + unsubChangesRequest.SetProfile(db.MessageUnsubChanges) + btc.addCollectionProperty(unsubChangesRequest) - err = btc.pullReplication.sendMsg(unsubChangesRequest) - require.NoError(t, err) + err = btc.pullReplication.sendMsg(unsubChangesRequest) + require.NoError(t, err) - testResponse := unsubChangesRequest.Response() - require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) - body, err := testResponse.Body() - require.NoError(t, err) - require.NotContains(t, string(body), "Panic:") + testResponse := unsubChangesRequest.Response() + require.Equal(t, strconv.Itoa(db.CBLReconnectErrorCode), testResponse.Properties[db.BlipErrorCode]) + body, err := testResponse.Body() + require.NoError(t, err) + require.NotContains(t, string(body), "Panic:") + }) } diff --git a/rest/blip_api_delta_sync_test.go b/rest/blip_api_delta_sync_test.go index 74651c909e..1bb5e7aaaa 100644 --- a/rest/blip_api_delta_sync_test.go +++ b/rest/blip_api_delta_sync_test.go @@ -30,60 +30,63 @@ func TestBlipDeltaSyncPushAttachment(t *testing.T) { if !base.IsEnterpriseEdition() { t.Skip("Delta test requires EE") } + rtConfig := &RestTesterConfig{ + DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ + DeltaSync: &DeltaSyncConfig{ + Enabled: base.BoolPtr(true), + }, + }}, + GuestEnabled: true, + } const docID = "pushAttachmentDoc" - rt := NewRestTester(t, - &RestTesterConfig{ - DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ - DeltaSync: &DeltaSyncConfig{ - Enabled: base.BoolPtr(true), - }, - }}, - GuestEnabled: true, - }) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, rtConfig) + defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Push first rev - version, err := btc.PushRev(docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) - require.NoError(t, err) + // Push first rev + version, err := btcRunner.PushRev(btc.id, docID, EmptyDocVersion(), []byte(`{"key":"val"}`)) + require.NoError(t, err) - // Push second rev with an attachment (no delta yet) - attData := base64.StdEncoding.EncodeToString([]byte("attach")) + // Push second rev with an attachment (no delta yet) + attData := base64.StdEncoding.EncodeToString([]byte("attach")) - version, err = btc.PushRev(docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) - require.NoError(t, err) + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(`{"key":"val","_attachments":{"myAttachment":{"data":"`+attData+`"}}}`)) + require.NoError(t, err) - syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err := rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found := syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found := syncData.Attachments["myAttachment"] + assert.True(t, found) - // Turn deltas on - btc.ClientDeltas = true + // Turn deltas on + btc.ClientDeltas = true - // Get existing body with the stub attachment, insert a new property and push as delta. - body, found := btc.GetVersion(docID, version) - require.True(t, found) + // Get existing body with the stub attachment, insert a new property and push as delta. + body, found := btcRunner.GetVersion(btc.id, docID, version) + require.True(t, found) - newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) - require.NoError(t, err) + newBody, err := base.InjectJSONPropertiesFromBytes(body, base.KVPairBytes{Key: "update", Val: []byte(`true`)}) + require.NoError(t, err) - _, err = btc.PushRev(docID, version, newBody) - require.NoError(t, err) + _, err = btcRunner.PushRev(btc.id, docID, version, newBody) + require.NoError(t, err) - syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) - require.NoError(t, err) + syncData, err = rt.GetSingleTestDatabaseCollection().GetDocSyncData(base.TestCtx(t), docID) + require.NoError(t, err) - assert.Len(t, syncData.Attachments, 1) - _, found = syncData.Attachments["myAttachment"] - assert.True(t, found) + assert.Len(t, syncData.Attachments, 1) + _, found = syncData.Attachments["myAttachment"] + assert.True(t, found) + }) } // Test pushing and pulling new attachments through delta sync @@ -106,59 +109,63 @@ func TestBlipDeltaSyncPushPullNewAttachment(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer btc.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - btc.ClientDeltas = true - err = btc.StartPull() - assert.NoError(t, err) - const docID = "doc1" + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer btc.Close() - // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment - bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` - version := rt.PutDoc(docID, bodyText) - data, ok := btc.WaitForVersion(docID, version) - assert.True(t, ok) - - bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` - require.JSONEq(t, bodyTextExpected, string(data)) - - // Update the replicated doc at client by adding another attachment. - bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` - version, err = btc.PushRev(docID, version, []byte(bodyText)) - require.NoError(t, err) - - // Wait for the document to be replicated at SG - _, ok = btc.pushReplication.WaitForMessage(2) - assert.True(t, ok) - - respBody := rt.GetDocVersion(docID, version) - - assert.Equal(t, docID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 1) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) - - attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, attachments, 2) - hello, ok := attachments["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(1), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - world, ok := attachments["world.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) - assert.Equal(t, float64(11), world["length"]) - assert.Equal(t, float64(2), world["revpos"]) - assert.Equal(t, true, world["stub"]) + btc.ClientDeltas = true + err := btcRunner.StartPull(btc.id) + assert.NoError(t, err) + const docID = "doc1" + + // Create doc1 rev 1-77d9041e49931ceef58a1eef5fd032e8 on SG with an attachment + bodyText := `{"greetings":[{"hi": "alice"}],"_attachments":{"hello.txt":{"data":"aGVsbG8gd29ybGQ="}}}` + version := rt.PutDoc(docID, bodyText) + data, ok := btcRunner.WaitForVersion(btc.id, docID, version) + assert.True(t, ok) + + bodyTextExpected := `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="}}}` + require.JSONEq(t, bodyTextExpected, string(data)) + + // Update the replicated doc at client by adding another attachment. + bodyText = `{"greetings":[{"hi":"alice"}],"_attachments":{"hello.txt":{"revpos":1,"length":11,"stub":true,"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0="},"world.txt":{"data":"bGVsbG8gd29ybGQ="}}}` + version, err = btcRunner.PushRev(btc.id, docID, version, []byte(bodyText)) + require.NoError(t, err) + + // Wait for the document to be replicated at SG + _, ok = btc.pushReplication.WaitForMessage(2) + assert.True(t, ok) + + respBody := rt.GetDocVersion(docID, version) + + assert.Equal(t, docID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 1) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[0]) + + attachments, ok := respBody[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, attachments, 2) + hello, ok := attachments["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(1), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + world, ok := attachments["world.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-qiF39gVoGPFzpRQkNYcY9u3wx9Y=", world["digest"]) + assert.Equal(t, float64(11), world["length"]) + assert.Equal(t, float64(2), world["revpos"]) + assert.Equal(t, true, world["stub"]) + }) } // TestBlipDeltaSyncNewAttachmentPull tests that adding a new attachment in SG and replicated via delta sync adds the attachment @@ -175,84 +182,88 @@ func TestBlipDeltaSyncNewAttachmentPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const doc1ID = "doc1" - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - const doc1ID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment - version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) - - data, ok = client.WaitForVersion(doc1ID, version) - assert.True(t, ok) - var dataMap map[string]interface{} - assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) - atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) - require.True(t, ok) - assert.Len(t, atts, 1) - hello, ok := atts["hello.txt"].(map[string]interface{}) - require.True(t, ok) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // message #3 is the getAttachment message that is sent in-between rev processing - msg, ok := client.pullReplication.WaitForMessage(3) - assert.True(t, ok) - assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") - - // Check EE is delta, and CE is full-body replication - // msg, ok = client.pullReplication.WaitForMessage(5) - msg, ok = client.WaitForBlipRevMessage(doc1ID, version) - assert.True(t, ok) - - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) - assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) - assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) - } - respBody := rt.GetDocVersion(doc1ID, version) - assert.Equal(t, doc1ID, respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 2) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - atts = respBody[db.BodyAttachments].(map[string]interface{}) - assert.Len(t, atts, 1) - assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) - assert.Equal(t, float64(11), hello["length"]) - assert.Equal(t, float64(2), hello["revpos"]) - assert.Equal(t, true, hello["stub"]) - - // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(doc1ID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-10000d5ec533b29b117e60274b1e3653 on SG with the first attachment + version = rt.UpdateDoc(doc1ID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}], "_attachments": {"hello.txt": {"data":"aGVsbG8gd29ybGQ="}}}`) + + data, ok = btcRunner.WaitForVersion(client.id, doc1ID, version) + assert.True(t, ok) + var dataMap map[string]interface{} + assert.NoError(t, base.JSONUnmarshal(data, &dataMap)) + atts, ok := dataMap[db.BodyAttachments].(map[string]interface{}) + require.True(t, ok) + assert.Len(t, atts, 1) + hello, ok := atts["hello.txt"].(map[string]interface{}) + require.True(t, ok) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // message #3 is the getAttachment message that is sent in-between rev processing + msg, ok := client.pullReplication.WaitForMessage(3) + assert.True(t, ok) + assert.NotEqual(t, blip.ErrorType, msg.Type(), "Expected non-error blip message type") + + // Check EE is delta, and CE is full-body replication + // msg, ok = client.pullReplication.WaitForMessage(5) + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, doc1ID, version) + assert.True(t, ok) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"_attachments":[{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}]}`, string(msgBody)) + assert.Contains(t, string(msgBody), `"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}}`) + assert.Contains(t, string(msgBody), `"greetings":[{"hello":"world!"},{"hi":"alice"}]`) + } + + respBody := rt.GetDocVersion(doc1ID, version) + assert.Equal(t, doc1ID, respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 2) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + atts = respBody[db.BodyAttachments].(map[string]interface{}) + assert.Len(t, atts, 1) + assert.Equal(t, "sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=", hello["digest"]) + assert.Equal(t, float64(11), hello["length"]) + assert.Equal(t, float64(2), hello["revpos"]) + assert.Equal(t, true, hello["stub"]) + + // assert.Equal(t, `{"_attachments":{"hello.txt":{"digest":"sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0=","length":11,"revpos":2,"stub":true}},"_id":"doc1","_rev":"2-10000d5ec533b29b117e60274b1e3653","greetings":[{"hello":"world!"},{"hi":"alice"}]}`, resp.Body.String()) + }) } // TestBlipDeltaSyncPull tests that a simple pull replication uses deltas in EE, @@ -262,7 +273,7 @@ func TestBlipDeltaSyncPull(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ Enabled: &sgUseDeltas, @@ -270,66 +281,67 @@ func TestBlipDeltaSyncPull(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - var deltaSentCount int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + var deltaSentCount int64 + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaSentCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - // Check EE is delta, and CE is full-body replication - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) - var afterDeltaSyncCount int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version = rt.UpdateDoc(docID, version, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version) + assert.True(t, ok) + + // Check EE is delta, and CE is full-body replication + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + + var afterDeltaSyncCount int64 + if rt.GetDatabase().DbStats.DeltaSync() != nil { + afterDeltaSyncCount = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + assert.Equal(t, deltaSentCount, afterDeltaSyncCount) } - - assert.Equal(t, deltaSentCount, afterDeltaSyncCount) - } + }) } // TestBlipDeltaSyncPullResend tests that a simple pull replication that uses a delta a client rejects will resend the revision in full. @@ -349,58 +361,61 @@ func TestBlipDeltaSyncPullResend(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - docID := "doc1" - // create doc1 rev 1 - docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - // reject deltas built ontop of rev 1 - client.rejectDeltasForSrcRev = docVersion1.RevID - - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) - data, ok := client.WaitForVersion(docID, docVersion1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2 - docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) - - data, ok = client.WaitForVersion(docID, docVersion2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - - // Check the request was initially sent with the correct deltaSrc property - assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) - - msg, ok = client.WaitForBlipRevMessage(docID, docVersion2) - assert.True(t, ok) - - // Check the resent request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err = msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + + docID := "doc1" + // create doc1 rev 1 + docVersion1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + deltaSentCount := rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + + // reject deltas built ontop of rev 1 + client.rejectDeltasForSrcRev = docVersion1.RevID + + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + data, ok := btcRunner.WaitForVersion(client.id, docID, docVersion1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2 + docVersion2 := rt.UpdateDoc(docID, docVersion1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": 12345678901234567890}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, docVersion2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + + // Check the request was initially sent with the correct deltaSrc property + assert.Equal(t, docVersion1.RevID, msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, deltaSentCount+1, rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value()) + + msg, ok = btcRunner.WaitForBlipRevMessage(client.id, docID, docVersion2) + assert.True(t, ok) + + // Check the resent request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err = msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":12345678901234567890}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":12345678901234567890}]}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullRemoved tests a simple pull replication that drops a document out of the user's channel. @@ -419,43 +434,47 @@ func TestBlipDeltaSyncPullRemoved(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, - }) - require.NoError(t, err) - defer client.Close() + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.SkipVersionVectorInitialization = true // v2 protocol test + const docID = "doc1" - err = client.StartPull() - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() - const docID = "doc1" - // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 - version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"_removed":true}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"_removed":true}`, string(msgBody)) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: []string{db.BlipCBMobileReplicationV2}, + }) + defer client.Close() + + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-1513b53e2738671e634d9dd111f48de0 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // create doc1 rev 2-ff91e11bc1fd12bbb4815a06571859a9 + version = rt.UpdateDoc(docID, version, `{"channels": ["private"], "greetings": [{"hello": "world!"}, {"hi": "bob"}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"_removed":true}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"_removed":true}`, string(msgBody)) + }) } // TestBlipDeltaSyncPullTombstoned tests a simple pull replication that deletes a document. @@ -473,7 +492,7 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{ + rtConfig := &RestTesterConfig{ DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ DeltaSync: &DeltaSyncConfig{ @@ -483,78 +502,82 @@ func TestBlipDeltaSyncPullTombstoned(t *testing.T) { }, SyncFn: channels.DocChannelsSyncFunction, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) var deltaCacheHitsStart int64 var deltaCacheMissesStart int64 var deltasRequestedStart int64 var deltasSentStart int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() - client, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "alice", - Channels: []string{"public"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } - err = client.StartPull() - assert.NoError(t, err) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "alice", + Channels: []string{"public"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client.Close() - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - - msg, ok := client.pullReplication.WaitForMessage(5) - assert.True(t, ok) - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{}`, string(msgBody)) - assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) - - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 - - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + const docID = "doc1" + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + + msg, ok := client.pullReplication.WaitForMessage(5) + assert.True(t, ok) + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{}`, string(msgBody)) + assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+1, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) // "_removed" docs are not counted as a delta + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullTombstonedStarChan tests two clients can perform a simple pull replication that deletes a document when the user has access to the star channel. @@ -576,129 +599,133 @@ func TestBlipDeltaSyncPullTombstonedStarChan(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyHTTP, base.KeyCache, base.KeySync, base.KeySyncMsg) sgUseDeltas := base.IsEnterpriseEdition() - rtConfig := RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + rtConfig := &RestTesterConfig{DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{DeltaSync: &DeltaSyncConfig{Enabled: &sgUseDeltas}}}} + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - var deltaCacheHitsStart int64 - var deltaCacheMissesStart int64 - var deltasRequestedStart int64 - var deltasSentStart int64 + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + rtConfig) + defer rt.Close() - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } - client1, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client1", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client1.Close() + var deltaCacheHitsStart int64 + var deltaCacheMissesStart int64 + var deltasRequestedStart int64 + var deltasSentStart int64 - client2, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "client2", - Channels: []string{"*"}, - ClientDeltas: true, - }) - require.NoError(t, err) - defer client2.Close() + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesStart = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedStart = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentStart = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + client1 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client1", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client1.Close() - err = client1.StartPull() - require.NoError(t, err) + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "client2", + Channels: []string{"*"}, + ClientDeltas: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer client2.Close() - const docID = "doc1" - // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 - version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - - data, ok := client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // Have client2 get only rev-1 and then stop replicating - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Contains(t, string(data), `"channels":["public"]`) - assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - - // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc - version = rt.DeleteDocReturnVersion(docID, version) - - data, ok = client1.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok := client1.WaitForBlipRevMessage(docID, version) // docid, revid to get the message - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - msgBody, err := msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client1.pullReplication.GetMessages()) - } + err := btcRunner.StartPull(client1.id) + require.NoError(t, err) - // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) - msg, ok = client2.WaitForBlipRevMessage(docID, version) - assert.True(t, ok) - - if !assert.Equal(t, db.MessageRev, msg.Profile()) { - t.Logf("unexpected profile for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - msgBody, err = msg.Body() - assert.NoError(t, err) - if !assert.Equal(t, `{}`, string(msgBody)) { - t.Logf("unexpected body for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } - if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { - t.Logf("unexpected deleted property for message %v in %v", - msg.SerialNumber(), client2.pullReplication.GetMessages()) - } + // create doc1 rev 1-e89945d756a1d444fa212bffbbb31941 + version := rt.PutDoc(docID, `{"channels": ["public"], "greetings": [{"hello": "world!"}]}`) - var deltaCacheHitsEnd int64 - var deltaCacheMissesEnd int64 - var deltasRequestedEnd int64 - var deltasSentEnd int64 + data, ok := btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() - deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() - } + // Have client2 get only rev-1 and then stop replicating + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Contains(t, string(data), `"channels":["public"]`) + assert.Contains(t, string(data), `"greetings":[{"hello":"world!"}]`) + + // tombstone doc1 at rev 2-2db70833630b396ef98a3ec75b3e90fc + version = rt.DeleteDocReturnVersion(docID, version) + + data, ok = btcRunner.WaitForVersion(client1.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client1.id, docID, version) // docid, revid to get the message + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + msgBody, err := msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client1.pullReplication.GetMessages()) + } - if sgUseDeltas { - assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) - assert.Equal(t, deltasSentStart+2, deltasSentEnd) - } else { - assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) - assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) - assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) - assert.Equal(t, deltasSentStart, deltasSentEnd) - } + // Sync Gateway will have cached the tombstone delta, so client 2 should be able to retrieve it from the cache + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) + msg, ok = btcRunner.WaitForBlipRevMessage(client2.id, docID, version) + assert.True(t, ok) + + if !assert.Equal(t, db.MessageRev, msg.Profile()) { + t.Logf("unexpected profile for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + msgBody, err = msg.Body() + assert.NoError(t, err) + if !assert.Equal(t, `{}`, string(msgBody)) { + t.Logf("unexpected body for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + if !assert.Equal(t, "1", msg.Properties[db.RevMessageDeleted]) { + t.Logf("unexpected deleted property for message %v in %v", + msg.SerialNumber(), client2.pullReplication.GetMessages()) + } + + var deltaCacheHitsEnd int64 + var deltaCacheMissesEnd int64 + var deltasRequestedEnd int64 + var deltasSentEnd int64 + + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaCacheHitsEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMissesEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + deltasRequestedEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasRequested.Value() + deltasSentEnd = rt.GetDatabase().DbStats.DeltaSync().DeltasSent.Value() + } + + if sgUseDeltas { + assert.Equal(t, deltaCacheHitsStart+1, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart+1, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart+2, deltasRequestedEnd) + assert.Equal(t, deltasSentStart+2, deltasSentEnd) + } else { + assert.Equal(t, deltaCacheHitsStart, deltaCacheHitsEnd) + assert.Equal(t, deltaCacheMissesStart, deltaCacheMissesEnd) + assert.Equal(t, deltasRequestedStart, deltasRequestedEnd) + assert.Equal(t, deltasSentStart, deltasSentEnd) + } + }) } // TestBlipDeltaSyncPullRevCache tests that a simple pull replication uses deltas in EE, @@ -720,79 +747,80 @@ func TestBlipDeltaSyncPullRevCache(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() + const docID = "doc1" + btcRunner := NewBlipTesterClientRunner(t) - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} - client.ClientDeltas = true - err = client.StartPull() - assert.NoError(t, err) + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - const docID = "doc1" - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // Perform a one-shot pull as client 2 to pull down the first revision - - client2, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client2.Close() - - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - data, ok = client2.WaitForVersion(docID, version1) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - - // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e - version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) - - data, ok = client.WaitForVersion(docID, version2) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) - msg, ok := client.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check EE is delta - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - - deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version - client2.ClientDeltas = true - err = client2.StartOneshotPull() - assert.NoError(t, err) - msg2, ok := client2.WaitForBlipRevMessage(docID, version2) - assert.True(t, ok) - - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody2, err := msg2.Body() - assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) - - updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() - updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() - - assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) - assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + client.ClientDeltas = true + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version1 := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // Perform a one-shot pull as client 2 to pull down the first revision + client2 := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client2.Close() + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + data, ok = btcRunner.WaitForVersion(client2.id, docID, version1) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + + // create doc1 rev 2-959f0e9ad32d84ff652fb91d8d0caa7e + version2 := rt.UpdateDoc(docID, version1, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}, {"howdy": "bob"}]}`) + + data, ok = btcRunner.WaitForVersion(client.id, docID, version2) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(data)) + msg, ok := btcRunner.WaitForBlipRevMessage(client.id, docID, version2) + assert.True(t, ok) + + // Check EE is delta + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + deltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + deltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + // Run another one shot pull to get the 2nd revision - validate it comes as delta, and uses cached version + client2.ClientDeltas = true + err = btcRunner.StartOneshotPull(client2.id) + assert.NoError(t, err) + msg2, ok := btcRunner.WaitForBlipRevMessage(client2.id, docID, version2) + assert.True(t, ok) + + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg2.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody2, err := msg2.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody2)) + + updatedDeltaCacheHits := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheHit.Value() + updatedDeltaCacheMisses := rt.GetDatabase().DbStats.DeltaSync().DeltaCacheMiss.Value() + + assert.Equal(t, deltaCacheHits+1, updatedDeltaCacheHits) + assert.Equal(t, deltaCacheMisses, updatedDeltaCacheMisses) + }) } // TestBlipDeltaSyncPush tests that a simple push replication handles deltas in EE, @@ -809,96 +837,100 @@ func TestBlipDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - client.ClientDeltas = true - - err = client.StartPull() - assert.NoError(t, err) - - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + btcRunner := NewBlipTesterClientRunner(t) const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abc on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + client.ClientDeltas = true - if base.IsEnterpriseEdition() { - // Check the request was sent with the correct deltaSrc property - assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was the actual delta - msgBody, err := msg.Body() + err := btcRunner.StartPull(client.id) assert.NoError(t, err) - assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - // Validate that generation of a delta didn't mutate the revision body in the revision cache - docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) - assert.NoError(t, cacheErr) - assert.NotContains(t, docRev.BodyBytes, "bob") - } else { - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abc on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - } - respBody := rt.GetDocVersion(docID, newRev) - assert.Equal(t, "doc1", respBody[db.BodyId]) - greetings := respBody["greetings"].([]interface{}) - assert.Len(t, greetings, 3) - assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) - assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) - assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + if base.IsEnterpriseEdition() { + // Check the request was sent with the correct deltaSrc property + assert.Equal(t, "1-0335a345b6ffed05707ccc4cbc1b67f4", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was the actual delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.Equal(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + + // Validate that generation of a delta didn't mutate the revision body in the revision cache + docRev, cacheErr := rt.GetSingleTestDatabaseCollection().GetRevisionCacheForTest().Get(base.TestCtx(t), "doc1", "1-0335a345b6ffed05707ccc4cbc1b67f4", db.RevCacheOmitBody, db.RevCacheOmitDelta) + assert.NoError(t, cacheErr) + assert.NotContains(t, docRev.BodyBytes, "bob") + } else { + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + } - // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) - deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) + respBody := rt.GetDocVersion(docID, newRev) + assert.Equal(t, "doc1", respBody[db.BodyId]) + greetings := respBody["greetings"].([]interface{}) + assert.Len(t, greetings, 3) + assert.Equal(t, map[string]interface{}{"hello": "world!"}, greetings[0]) + assert.Equal(t, map[string]interface{}{"hi": "alice"}, greetings[1]) + assert.Equal(t, map[string]interface{}{"howdy": "bob"}, greetings[2]) - data, ok = client.WaitForVersion(docID, deletedVersion) - assert.True(t, ok) - assert.Equal(t, `{}`, string(data)) + // tombstone doc1 (gets rev 3-f3be6c85e0362153005dae6f08fc68bb) + deletedVersion := rt.DeleteDocReturnVersion(docID, newRev) - var deltaPushDocCountStart int64 + data, ok = btcRunner.WaitForVersion(client.id, docID, deletedVersion) + assert.True(t, ok) + assert.Equal(t, `{}`, string(data)) - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } + var deltaPushDocCountStart int64 - _, err = client.PushRev(docID, deletedVersion, []byte(`{"undelete":true}`)) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountStart = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } - if base.IsEnterpriseEdition() { - // Now make the client push up a delta that has the parent of the tombstone. - // This is not a valid scenario, and is actively prevented on the CBL side. - assert.Error(t, err) - assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") - } else { - // Pushing a full body revision on top of a tombstone is valid. - // CBL clients should fall back to this. The test client doesn't. - assert.NoError(t, err) - } + _, err = btcRunner.PushRev(client.id, docID, deletedVersion, []byte(`{"undelete":true}`)) + + if base.IsEnterpriseEdition() { + // Now make the client push up a delta that has the parent of the tombstone. + // This is not a valid scenario, and is actively prevented on the CBL side. + assert.Error(t, err) + assert.Contains(t, err.Error(), "Can't use delta. Found tombstone for doc") + } else { + // Pushing a full body revision on top of a tombstone is valid. + // CBL clients should fall back to this. The test client doesn't. + assert.NoError(t, err) + } - var deltaPushDocCountEnd int64 + var deltaPushDocCountEnd int64 - if rt.GetDatabase().DbStats.DeltaSync() != nil { - deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() - } - assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + if rt.GetDatabase().DbStats.DeltaSync() != nil { + deltaPushDocCountEnd = rt.GetDatabase().DbStats.DeltaSync().DeltaPushDocCount.Value() + } + assert.Equal(t, deltaPushDocCountStart, deltaPushDocCountEnd) + }) } // TestBlipNonDeltaSyncPush tests that a client that doesn't support deltas can push to a SG that supports deltas (either CE or EE) @@ -914,41 +946,45 @@ func TestBlipNonDeltaSyncPush(t *testing.T) { }}, GuestEnabled: true, } - rt := NewRestTester(t, - &rtConfig) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc1" - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &rtConfig) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() - client.ClientDeltas = false - err = client.StartPull() - assert.NoError(t, err) + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() - // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 - const docID = "doc1" - version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) - - data, ok := client.WaitForVersion(docID, version) - assert.True(t, ok) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) - // create doc1 rev 2-abcxyz on client - newRev, err := client.PushRev(docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) - assert.NoError(t, err) - // Check EE is delta, and CE is full-body replication - msg, found := client.waitForReplicationMessage(collection, 2) - assert.True(t, found) - - // Check the request was NOT sent with a deltaSrc property - assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) - // Check the request body was NOT the delta - msgBody, err := msg.Body() - assert.NoError(t, err) - assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) - assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) - - body := rt.GetDocVersion("doc1", newRev) - require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + client.ClientDeltas = false + err := btcRunner.StartPull(client.id) + assert.NoError(t, err) + + // create doc1 rev 1-0335a345b6ffed05707ccc4cbc1b67f4 + version := rt.PutDoc(docID, `{"greetings": [{"hello": "world!"}, {"hi": "alice"}]}`) + + data, ok := btcRunner.WaitForVersion(client.id, docID, version) + assert.True(t, ok) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"}]}`, string(data)) + // create doc1 rev 2-abcxyz on client + newRev, err := btcRunner.PushRev(client.id, docID, version, []byte(`{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`)) + assert.NoError(t, err) + // Check EE is delta, and CE is full-body replication + msg, found := client.waitForReplicationMessage(collection, 2) + assert.True(t, found) + + // Check the request was NOT sent with a deltaSrc property + assert.Equal(t, "", msg.Properties[db.RevMessageDeltaSrc]) + // Check the request body was NOT the delta + msgBody, err := msg.Body() + assert.NoError(t, err) + assert.NotEqual(t, `{"greetings":{"2-":[{"howdy":"bob"}]}}`, string(msgBody)) + assert.Equal(t, `{"greetings":[{"hello":"world!"},{"hi":"alice"},{"howdy":"bob"}]}`, string(msgBody)) + + body := rt.GetDocVersion("doc1", newRev) + require.Equal(t, "bob", body["greetings"].([]interface{})[2].(map[string]interface{})["howdy"]) + }) } diff --git a/rest/blip_api_no_race_test.go b/rest/blip_api_no_race_test.go index f6e35f9cf1..70688eb559 100644 --- a/rest/blip_api_no_race_test.go +++ b/rest/blip_api_no_race_test.go @@ -44,65 +44,68 @@ func TestBlipPusherUpdateDatabase(t *testing.T) { GuestEnabled: true, CustomTestBucket: tb.NoCloseClone(), } - rt := NewRestTester(t, &rtConfig) - defer rt.Close() - - client, err := NewBlipTesterClientOptsWithRT(t, rt, nil) - require.NoError(t, err) - defer client.Close() - - var lastPushRevErr atomic.Value - - // Wait for the background updates to finish at the end of the test - shouldCreateDocs := base.NewAtomicBool(true) - wg := sync.WaitGroup{} - wg.Add(1) - defer func() { - shouldCreateDocs.Set(false) - wg.Wait() - }() - - // Start the test client creating and pushing documents in the background - go func() { - for i := 0; shouldCreateDocs.IsTrue(); i++ { - // this will begin to error when the database is reloaded underneath the replication - _, err := client.PushRev(fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) - if err != nil { - lastPushRevErr.Store(err) - } - } - _ = rt.WaitForPendingChanges() - wg.Done() - }() - - // and wait for a few to be done before we proceed with updating database config underneath replication - _, err = rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) - require.NoError(t, err) - - // just change the sync function to cause the database to reload - dbConfig := *rt.ServerContext().GetDbConfig("db") - dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) - resp := rt.ReplaceDbConfig("db", dbConfig) - RequireStatus(t, resp, http.StatusCreated) - - // Did we tell the client to close the connection (via HTTP/503)? - // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. - WaitAndAssertCondition(t, func() bool { - lastErr, ok := lastPushRevErr.Load().(error) - if !ok { - return false - } - if lastErr == nil { - return false - } - lastErrMsg := lastErr.Error() - if !strings.Contains(lastErrMsg, "HTTP 503") { - return false - } - if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { - return false - } - return true - }, "expected HTTP 503 error") + btcRunner := NewBlipTesterClientRunner(t) + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, &rtConfig) + defer rt.Close() + + opts := &BlipTesterClientOpts{SupportedBLIPProtocols: SupportedBLIPProtocols} + client := btcRunner.NewBlipTesterClientOptsWithRT(rt, opts) + defer client.Close() + + var lastPushRevErr atomic.Value + + // Wait for the background updates to finish at the end of the test + shouldCreateDocs := base.NewAtomicBool(true) + wg := sync.WaitGroup{} + wg.Add(1) + defer func() { + shouldCreateDocs.Set(false) + wg.Wait() + }() + + // Start the test client creating and pushing documents in the background + go func() { + for i := 0; shouldCreateDocs.IsTrue(); i++ { + // this will begin to error when the database is reloaded underneath the replication + _, err := btcRunner.PushRev(client.id, fmt.Sprintf("doc%d", i), EmptyDocVersion(), []byte(fmt.Sprintf(`{"i":%d}`, i))) + if err != nil { + lastPushRevErr.Store(err) + } + } + _ = rt.WaitForPendingChanges() + wg.Done() + }() + + // and wait for a few to be done before we proceed with updating database config underneath replication + _, err := rt.WaitForChanges(5, "/{{.keyspace}}/_changes", "", true) + require.NoError(t, err) + + // just change the sync function to cause the database to reload + dbConfig := *rt.ServerContext().GetDbConfig("db") + dbConfig.Sync = base.StringPtr(`function(doc){console.log("update");}`) + resp := rt.ReplaceDbConfig("db", dbConfig) + RequireStatus(t, resp, http.StatusCreated) + + // Did we tell the client to close the connection (via HTTP/503)? + // The BlipTesterClient doesn't implement reconnect - but CBL resets the replication connection. + WaitAndAssertCondition(t, func() bool { + lastErr, ok := lastPushRevErr.Load().(error) + if !ok { + return false + } + if lastErr == nil { + return false + } + lastErrMsg := lastErr.Error() + if !strings.Contains(lastErrMsg, "HTTP 503") { + return false + } + if !strings.Contains(lastErrMsg, "Sync Gateway database went away - asking client to reconnect") { + return false + } + return true + }, "expected HTTP 503 error") + }) } diff --git a/rest/blip_client_test.go b/rest/blip_client_test.go index f3e808aae0..c663a14f6a 100644 --- a/rest/blip_client_test.go +++ b/rest/blip_client_test.go @@ -46,6 +46,7 @@ type BlipTesterClientOpts struct { type BlipTesterClient struct { BlipTesterClientOpts + id uint32 // unique ID for the client rt *RestTester pullReplication *BlipTesterReplicator // SG -> CBL replications pushReplication *BlipTesterReplicator // CBL -> SG replications @@ -69,6 +70,14 @@ type BlipTesterCollectionClient struct { lastReplicatedRevLock sync.RWMutex // lock for lastReplicatedRev map } +// BlipTestClientRunner is for running the blip tester client and its associated methods in test framework +type BlipTestClientRunner struct { + clients map[uint32]*BlipTesterClient // map of created BlipTesterClient's + t *testing.T + initialisedInsideRunnerCode bool // flag to check that the BlipTesterClient is being initialised in the correct area (inside the Run() method) + SkipVersionVectorInitialization bool // used to skip the version vector subtest +} + type BodyMessagePair struct { body []byte message *blip.Message @@ -85,6 +94,14 @@ type BlipTesterReplicator struct { replicationStats *db.BlipSyncStats // Stats of replications } +// NewBlipTesterClientRunner creates a BlipTestClientRunner type +func NewBlipTesterClientRunner(t *testing.T) *BlipTestClientRunner { + return &BlipTestClientRunner{ + t: t, + clients: make(map[uint32]*BlipTesterClient), + } +} + func (btr *BlipTesterReplicator) Close() { btr.bt.Close() btr.messagesLock.Lock() @@ -571,33 +588,80 @@ func getCollectionsForBLIP(_ testing.TB, rt *RestTester) []string { return collections } -func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { +func (btcRunner *BlipTestClientRunner) NewBlipTesterClientOptsWithRT(rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient) { + if !btcRunner.initialisedInsideRunnerCode { + btcRunner.t.Fatalf("must initialise BlipTesterClient inside Run() method") + } if opts == nil { opts = &BlipTesterClientOpts{} } - btc := BlipTesterClient{ + id, err := uuid.NewRandom() + require.NoError(btcRunner.t, err) + + client = &BlipTesterClient{ BlipTesterClientOpts: *opts, rt: rt, + id: id.ID(), + } + btcRunner.clients[client.id] = client + err = client.createBlipTesterReplications() + require.NoError(btcRunner.t, err) + + return client +} + +func (btc *BlipTesterClient) Close() { + btc.tearDownBlipClientReplications() + for _, collectionClient := range btc.collectionClients { + collectionClient.Close() } + if btc.nonCollectionAwareClient != nil { + btc.nonCollectionAwareClient.Close() + } +} + +func (btcRunner *BlipTestClientRunner) Run(test func(t *testing.T, SupportedBLIPProtocols []string)) { + btcRunner.initialisedInsideRunnerCode = true + // reset to protect against someone creating a new client after Run() is run + defer func() { btcRunner.initialisedInsideRunnerCode = false }() + btcRunner.t.Run("revTree", func(t *testing.T) { + test(t, []string{db.BlipCBMobileReplicationV3}) + }) + // if test is not wanting version vector subprotocol to be run, return before we start this subtest + if btcRunner.SkipVersionVectorInitialization { + return + } + btcRunner.t.Run("versionVector", func(t *testing.T) { + t.Skip("skip VV subtest on master") + // bump sub protocol version here and pass into test function pending CBG-3253 + test(t, nil) + }) +} +func (btc *BlipTesterClient) tearDownBlipClientReplications() { + btc.pullReplication.Close() + btc.pushReplication.Close() +} + +func (btc *BlipTesterClient) createBlipTesterReplications() error { id, err := uuid.NewRandom() if err != nil { - return nil, err + return err } - if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pushReplication, err = newBlipTesterReplication(btc.rt.TB, "push"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), &btc, opts.SkipCollectionsInitialization); err != nil { - return nil, err + if btc.pullReplication, err = newBlipTesterReplication(btc.rt.TB, "pull"+id.String(), btc, btc.BlipTesterClientOpts.SkipCollectionsInitialization); err != nil { + return err } - collections := getCollectionsForBLIP(tb, rt) - if !opts.SkipCollectionsInitialization && len(collections) > 0 { + collections := getCollectionsForBLIP(btc.rt.TB, btc.rt) + if !btc.BlipTesterClientOpts.SkipCollectionsInitialization && len(collections) > 0 { btc.collectionClients = make([]*BlipTesterCollectionClient, len(collections)) for i, collection := range collections { if err := btc.initCollectionReplication(collection, i); err != nil { - return nil, err + return err } } } else { @@ -605,40 +669,14 @@ func createBlipTesterClientOpts(tb testing.TB, rt *RestTester, opts *BlipTesterC docs: make(map[string]map[string]*BodyMessagePair), attachments: make(map[string][]byte), lastReplicatedRev: make(map[string]string), - parent: &btc, + parent: btc, } - } - return &btc, nil -} - -// NewBlipTesterClient returns a client which emulates the behaviour of a CBL client over BLIP. -func NewBlipTesterClient(tb testing.TB, rt *RestTester) (client *BlipTesterClient, err error) { - return createBlipTesterClientOpts(tb, rt, nil) -} - -func NewBlipTesterClientOptsWithRT(tb testing.TB, rt *RestTester, opts *BlipTesterClientOpts) (client *BlipTesterClient, err error) { - client, err = createBlipTesterClientOpts(tb, rt, opts) - if err != nil { - return nil, err - } - - client.pullReplication.bt.avoidRestTesterClose = true - client.pushReplication.bt.avoidRestTesterClose = true - - return client, nil -} + btc.pullReplication.bt.avoidRestTesterClose = true + btc.pushReplication.bt.avoidRestTesterClose = true -func (btc *BlipTesterClient) Close() { - btc.pullReplication.Close() - btc.pushReplication.Close() - for _, collectionClient := range btc.collectionClients { - collectionClient.Close() - } - if btc.nonCollectionAwareClient != nil { - btc.nonCollectionAwareClient.Close() - } + return nil } func (btc *BlipTesterClient) initCollectionReplication(collection string, collectionIdx int) error { @@ -668,25 +706,25 @@ func (btc *BlipTesterClient) waitForReplicationMessage(collection *db.DatabaseCo } // SingleCollection returns a single collection blip tester if the RestTester database is configured with only one collection. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) SingleCollection() *BlipTesterCollectionClient { - if btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) SingleCollection(clientID uint32) *BlipTesterCollectionClient { + if btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - require.Equal(btc.rt.TB, 1, len(btc.collectionClients)) - return btc.collectionClients[0] + require.Equal(btcRunner.clients[clientID].rt.TB, 1, len(btcRunner.clients[clientID].collectionClients)) + return btcRunner.clients[clientID].collectionClients[0] } // Collection return a collection blip tester by name, if configured in the RestTester database. Otherwise, throw a fatal test error. -func (btc *BlipTesterClient) Collection(collectionName string) *BlipTesterCollectionClient { - if collectionName == "_default._default" && btc.nonCollectionAwareClient != nil { - return btc.nonCollectionAwareClient +func (btcRunner *BlipTestClientRunner) Collection(clientID uint32, collectionName string) *BlipTesterCollectionClient { + if collectionName == "_default._default" && btcRunner.clients[clientID].nonCollectionAwareClient != nil { + return btcRunner.clients[clientID].nonCollectionAwareClient } - for _, collectionClient := range btc.collectionClients { + for _, collectionClient := range btcRunner.clients[clientID].collectionClients { if collectionClient.collection == collectionName { return collectionClient } } - btc.rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) + btcRunner.clients[clientID].rt.TB.Fatalf("Could not find collection %s in BlipTesterClient", collectionName) return nil } @@ -1126,81 +1164,81 @@ func (btc *BlipTesterCollectionClient) GetBlipRevMessage(docID, revID string) (m return nil, false } -func (btc *BlipTesterClient) StartPull() error { - return btc.SingleCollection().StartPull() +func (btcRunner *BlipTestClientRunner) StartPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartPull() } // WaitForVersion blocks until the given document version has been stored by the client, and returns the data when found. -func (btc *BlipTesterClient) WaitForVersion(docID string, docVersion DocVersion) (data []byte, found bool) { - return btc.SingleCollection().WaitForVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForVersion(clientID uint32, docID string, docVersion DocVersion) (data []byte, found bool) { + return btcRunner.SingleCollection(clientID).WaitForVersion(docID, docVersion) } -func (btc *BlipTesterClient) WaitForDoc(docID string) ([]byte, bool) { - return btc.SingleCollection().WaitForDoc(docID) +func (btcRunner *BlipTestClientRunner) WaitForDoc(clientID uint32, docID string) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).WaitForDoc(docID) } -func (btc *BlipTesterClient) WaitForBlipRevMessage(docID string, docVersion DocVersion) (*blip.Message, bool) { - return btc.SingleCollection().WaitForBlipRevMessage(docID, docVersion) +func (btcRunner *BlipTestClientRunner) WaitForBlipRevMessage(clientID uint32, docID string, docVersion DocVersion) (*blip.Message, bool) { + return btcRunner.SingleCollection(clientID).WaitForBlipRevMessage(docID, docVersion) } -func (btc *BlipTesterClient) StartOneshotPull() error { - return btc.SingleCollection().StartOneshotPull() +func (btcRunner *BlipTestClientRunner) StartOneshotPull(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPull() } -func (btc *BlipTesterClient) StartOneshotPullFiltered(channels string) error { - return btc.SingleCollection().StartOneshotPullFiltered(channels) +func (btcRunner *BlipTestClientRunner) StartOneshotPullFiltered(clientID uint32, channels string) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullFiltered(channels) } -func (btc *BlipTesterClient) StartOneshotPullRequestPlus() error { - return btc.SingleCollection().StartOneshotPullRequestPlus() +func (btcRunner *BlipTestClientRunner) StartOneshotPullRequestPlus(clientID uint32) error { + return btcRunner.SingleCollection(clientID).StartOneshotPullRequestPlus() } -func (btc *BlipTesterClient) PushRev(docID string, version DocVersion, body []byte) (DocVersion, error) { - return btc.SingleCollection().PushRev(docID, version, body) +func (btcRunner *BlipTestClientRunner) PushRev(clientID uint32, docID string, version DocVersion, body []byte) (DocVersion, error) { + return btcRunner.SingleCollection(clientID).PushRev(docID, version, body) } -func (btc *BlipTesterClient) StartPullSince(continuous, since, activeOnly string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, "", "") +func (btcRunner *BlipTestClientRunner) StartPullSince(clientID uint32, continuous, since, activeOnly string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, "", "") } -func (btc *BlipTesterClient) StartFilteredPullSince(continuous, since, activeOnly string, channels string) error { - return btc.SingleCollection().StartPullSince(continuous, since, activeOnly, channels, "") +func (btcRunner *BlipTestClientRunner) StartFilteredPullSince(clientID uint32, continuous, since, activeOnly, channels string) error { + return btcRunner.SingleCollection(clientID).StartPullSince(continuous, since, activeOnly, channels, "") } -func (btc *BlipTesterClient) GetVersion(docID string, docVersion DocVersion) ([]byte, bool) { - return btc.SingleCollection().GetVersion(docID, docVersion) +func (btcRunner *BlipTestClientRunner) GetVersion(clientID uint32, docID string, docVersion DocVersion) ([]byte, bool) { + return btcRunner.SingleCollection(clientID).GetVersion(docID, docVersion) } -func (btc *BlipTesterClient) saveAttachment(contentType string, attachmentData string) (int, string, error) { - return btc.SingleCollection().saveAttachment(contentType, attachmentData) +func (btcRunner *BlipTestClientRunner) saveAttachment(clientID uint32, contentType string, attachmentData string) (int, string, error) { + return btcRunner.SingleCollection(clientID).saveAttachment(contentType, attachmentData) } -func (btc *BlipTesterClient) StoreRevOnClient(docID, revID string, body []byte) error { - return btc.SingleCollection().StoreRevOnClient(docID, revID, body) +func (btcRunner *BlipTestClientRunner) StoreRevOnClient(clientID uint32, docID, revID string, body []byte) error { + return btcRunner.SingleCollection(clientID).StoreRevOnClient(docID, revID, body) } -func (btc *BlipTesterClient) PushRevWithHistory(docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { - return btc.SingleCollection().PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) +func (btcRunner *BlipTestClientRunner) PushRevWithHistory(clientID uint32, docID, revID string, body []byte, revCount, prunedRevCount int) (string, error) { + return btcRunner.SingleCollection(clientID).PushRevWithHistory(docID, revID, body, revCount, prunedRevCount) } -func (btc *BlipTesterClient) AttachmentsLock() *sync.RWMutex { - return &btc.SingleCollection().attachmentsLock +func (btcRunner *BlipTestClientRunner) AttachmentsLock(clientID uint32) *sync.RWMutex { + return &btcRunner.SingleCollection(clientID).attachmentsLock } func (btc *BlipTesterCollectionClient) AttachmentsLock() *sync.RWMutex { return &btc.attachmentsLock } -func (btc *BlipTesterClient) Attachments() map[string][]byte { - return btc.SingleCollection().attachments +func (btcRunner *BlipTestClientRunner) Attachments(clientID uint32) map[string][]byte { + return btcRunner.SingleCollection(clientID).attachments } func (btc *BlipTesterCollectionClient) Attachments() map[string][]byte { return btc.attachments } -func (btc *BlipTesterClient) UnsubPullChanges() ([]byte, error) { - return btc.SingleCollection().UnsubPullChanges() +func (btcRunner *BlipTestClientRunner) UnsubPullChanges(clientID uint32) ([]byte, error) { + return btcRunner.SingleCollection(clientID).UnsubPullChanges() } func (btc *BlipTesterCollectionClient) addCollectionProperty(msg *blip.Message) { diff --git a/rest/revocation_test.go b/rest/revocation_test.go index 35359e5f1d..06897b002f 100644 --- a/rest/revocation_test.go +++ b/rest/revocation_test.go @@ -2223,297 +2223,313 @@ func TestReplicatorRevocationsFromZero(t *testing.T) { func TestRevocationMessage(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() - - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() - - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() + + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - version := rt.PutDoc("doc", `{"channels": "A"}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc("doc", `{"channels": "A"}`) - // Start pull - err = btc.StartOneshotPull() - assert.NoError(t, err) - - // Wait for doc revision to come over - _, ok := btc.WaitForBlipRevMessage("doc", version) - require.True(t, ok) + require.NoError(t, rt.WaitForPendingChanges()) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Start pull + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - const doc1ID = "doc1" - version = rt.PutDoc(doc1ID, `{"channels": "!"}`) + // Wait for doc revision to come over + _, ok := btcRunner.WaitForBlipRevMessage(btc.id, "doc", version) + require.True(t, ok) - revocationTester.fillToSeq(10) - version = rt.UpdateDoc(doc1ID, version, "{}") + // Remove role from user + revocationTester.removeRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) + const doc1ID = "doc1" + version = rt.PutDoc(doc1ID, `{"channels": "!"}`) - // Start a pull since 5 to receive revocation and removal - err = btc.StartPullSince("false", "5", "false") - assert.NoError(t, err) + revocationTester.fillToSeq(10) + version = rt.UpdateDoc(doc1ID, version, "{}") - // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here - _, found := btc.WaitForVersion(doc1ID, version) - require.True(t, found) - - messages := btc.pullReplication.GetMessages() - - testCases := []struct { - Name string - DocID string - ExpectedDeleted int64 - }{ - { - Name: "Revocation", - DocID: "doc", - ExpectedDeleted: int64(2), - }, - { - Name: "Removed", - DocID: "doc1", - ExpectedDeleted: int64(4), - }, - } + require.NoError(t, rt.WaitForPendingChanges()) - for _, testCase := range testCases { - t.Run(testCase.Name, func(t *testing.T) { - // Verify the deleted property in the changes message is "2" this indicated a revocation - for _, msg := range messages { - if msg.Properties[db.BlipProfile] == db.MessageChanges { - var changesMessages [][]interface{} - err = msg.ReadJSONBody(&changesMessages) - if err != nil { - continue - } + // Start a pull since 5 to receive revocation and removal + err = btcRunner.StartPullSince(btc.id, "false", "5", "false") + assert.NoError(t, err) - if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { - continue - } + // Wait for doc1 rev2 - This is the last rev we expect so we can be sure replication is complete here + _, found := btcRunner.WaitForVersion(btc.id, doc1ID, version) + require.True(t, found) + + messages := btc.pullReplication.GetMessages() + + testCases := []struct { + Name string + DocID string + ExpectedDeleted int64 + }{ + { + Name: "Revocation", + DocID: "doc", + ExpectedDeleted: int64(2), + }, + { + Name: "Removed", + DocID: "doc1", + ExpectedDeleted: int64(4), + }, + } - criteriaMet := false - for _, changesMessage := range changesMessages { - castedNum, ok := changesMessage[3].(json.Number) - if !ok { + for _, testCase := range testCases { + t.Run(testCase.Name, func(t *testing.T) { + // Verify the deleted property in the changes message is "2" this indicated a revocation + for _, msg := range messages { + if msg.Properties[db.BlipProfile] == db.MessageChanges { + var changesMessages [][]interface{} + err = msg.ReadJSONBody(&changesMessages) + if err != nil { continue } - intDeleted, err := castedNum.Int64() - if err != nil { + + if len(changesMessages) != 2 || len(changesMessages[0]) != 4 { continue } - if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { - criteriaMet = true - break + + criteriaMet := false + for _, changesMessage := range changesMessages { + castedNum, ok := changesMessage[3].(json.Number) + if !ok { + continue + } + intDeleted, err := castedNum.Int64() + if err != nil { + continue + } + if docName, ok := changesMessage[1].(string); ok && docName == testCase.DocID && intDeleted == testCase.ExpectedDeleted { + criteriaMet = true + break + } } - } - assert.True(t, criteriaMet) + assert.True(t, criteriaMet) + } } - } - }) - } + }) + } - assert.NoError(t, err) + assert.NoError(t, err) + }) } func TestRevocationNoRev(t *testing.T) { defer db.SuspendSequenceBatching()() - revocationTester, rt := InitScenario(t, nil) - defer rt.Close() + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc" + const waitMarkerID = "docmarker" - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + revocationTester, rt := InitScenario(t, nil) + defer rt.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}`) + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc(docID, `{"channels": "A"}`) - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) - // Remove role from user - revocationTester.removeRole("user", "foo") + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + // Remove role from user + revocationTester.removeRole("user", "foo") - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - messages := btc.pullReplication.GetMessages() + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) - var highestMsgSeq uint32 - var highestSeqMsg blip.Message - // Grab most recent changes message - for _, message := range messages { - messageBody, err := message.Body() - require.NoError(t, err) - if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { - if highestMsgSeq < uint32(message.SerialNumber()) { - highestMsgSeq = uint32(message.SerialNumber()) - highestSeqMsg = message + messages := btc.pullReplication.GetMessages() + + var highestMsgSeq uint32 + var highestSeqMsg blip.Message + // Grab most recent changes message + for _, message := range messages { + messageBody, err := message.Body() + require.NoError(t, err) + if message.Properties["Profile"] == db.MessageChanges && string(messageBody) != "null" { + if highestMsgSeq < uint32(message.SerialNumber()) { + highestMsgSeq = uint32(message.SerialNumber()) + highestSeqMsg = message + } } } - } - var messageBody []interface{} - err = highestSeqMsg.ReadJSONBody(&messageBody) - require.NoError(t, err) - require.Len(t, messageBody, 2) - require.Len(t, messageBody[0], 4) + var messageBody []interface{} + err = highestSeqMsg.ReadJSONBody(&messageBody) + require.NoError(t, err) + require.Len(t, messageBody, 2) + require.Len(t, messageBody[0], 4) - deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() - require.NoError(t, err) + deletedFlag, err := messageBody[0].([]interface{})[3].(json.Number).Int64() + require.NoError(t, err) - assert.Equal(t, deletedFlag, int64(2)) + assert.Equal(t, deletedFlag, int64(2)) + }) } func TestRevocationGetSyncDataError(t *testing.T) { defer db.SuspendSequenceBatching()() var throw bool base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - // Two callbacks to cover usage with CBS/Xattrs and without - revocationTester, rt := InitScenario( - t, &RestTesterConfig{ - leakyBucketConfig: &base.LeakyBucketConfig{ - GetWithXattrCallback: func(key string) error { - return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") - }, GetRawCallback: func(key string) error { - if throw { - return fmt.Errorf("Leaky Bucket GetRawCallback Error") - } - return nil + btcRunner := NewBlipTesterClientRunner(t) + const docID = "doc" + const waitMarkerID = "docmarker" + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + // Two callbacks to cover usage with CBS/Xattrs and without + revocationTester, rt := InitScenario( + t, &RestTesterConfig{ + leakyBucketConfig: &base.LeakyBucketConfig{ + GetWithXattrCallback: func(key string) error { + return fmt.Errorf("Leaky Bucket GetWithXattrCallback Error") + }, GetRawCallback: func(key string) error { + if throw { + return fmt.Errorf("Leaky Bucket GetRawCallback Error") + } + return nil + }, }, }, - }, - ) + ) - defer rt.Close() + defer rt.Close() - btc, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "user", - Channels: []string{"*"}, - ClientDeltas: false, - SendRevocations: true, - }) - assert.NoError(t, err) - defer btc.Close() + btc := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "user", + Channels: []string{"*"}, + ClientDeltas: false, + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer btc.Close() - // Add channel to role and role to user - revocationTester.addRoleChannel("foo", "A") - revocationTester.addRole("user", "foo") + // Add channel to role and role to user + revocationTester.addRoleChannel("foo", "A") + revocationTester.addRole("user", "foo") - // Skip to seq 4 and then create doc in channel A - revocationTester.fillToSeq(4) - const docID = "doc" - version := rt.PutDoc(docID, `{"channels": "A"}}`) + // Skip to seq 4 and then create doc in channel A + revocationTester.fillToSeq(4) + version := rt.PutDoc(docID, `{"channels": "A"}}`) - require.NoError(t, rt.WaitForPendingChanges()) - firstOneShotSinceSeq := rt.GetDocumentSequence("doc") + require.NoError(t, rt.WaitForPendingChanges()) + firstOneShotSinceSeq := rt.GetDocumentSequence("doc") - // OneShot pull to grab doc - err = btc.StartOneshotPull() - assert.NoError(t, err) - throw = true - _, ok := btc.WaitForVersion(docID, version) - require.True(t, ok) + // OneShot pull to grab doc + err := btcRunner.StartOneshotPull(btc.id) + assert.NoError(t, err) + throw = true + _, ok := btcRunner.WaitForVersion(btc.id, docID, version) + require.True(t, ok) - // Remove role from user - revocationTester.removeRole("user", "foo") + // Remove role from user + revocationTester.removeRole("user", "foo") - _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) + _ = rt.UpdateDoc(docID, version, `{"channels": "A", "val": "mutate"}`) - const waitMarkerID = "docmarker" - waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) - require.NoError(t, rt.WaitForPendingChanges()) + waitMarkerVersion := rt.PutDoc(waitMarkerID, `{"channels": "!"}`) + require.NoError(t, rt.WaitForPendingChanges()) - lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) - err = btc.StartPullSince("false", lastSeqStr, "false") - assert.NoError(t, err) + lastSeqStr := strconv.FormatUint(firstOneShotSinceSeq, 10) + err = btcRunner.StartPullSince(btc.id, "false", lastSeqStr, "false") + assert.NoError(t, err) - _, ok = btc.WaitForVersion(waitMarkerID, waitMarkerVersion) - require.True(t, ok) + _, ok = btcRunner.WaitForVersion(btc.id, waitMarkerID, waitMarkerVersion) + require.True(t, ok) + }) } // Regression test for CBG-2183. func TestBlipRevokeNonExistentRole(t *testing.T) { - rt := NewRestTester(t, - &RestTesterConfig{ - GuestEnabled: false, - }) - defer rt.Close() - collection := rt.GetSingleTestDatabaseCollection() - base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) - res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) - RequireStatus(t, res, http.StatusCreated) - - // Create a doc so we have something to replicate - res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) - RequireStatus(t, res, http.StatusCreated) + btcRunner := NewBlipTesterClientRunner(t) + + btcRunner.Run(func(t *testing.T, SupportedBLIPProtocols []string) { + rt := NewRestTester(t, + &RestTesterConfig{ + GuestEnabled: false, + }) + defer rt.Close() + collection := rt.GetSingleTestDatabaseCollection() + + // 1. Create user with admin_roles including two roles not previously defined (a1 and a2, for example) + res := rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{"c1"}, []string{"a1", "a2"})) + RequireStatus(t, res, http.StatusCreated) + + // Create a doc so we have something to replicate + res = rt.SendAdminRequest(http.MethodPut, "/{{.keyspace}}/testdoc", `{"channels": ["c1"]}`) + RequireStatus(t, res, http.StatusCreated) + + // 3. Update the user to not reference one of the roles (update to ['a1'], for example) + // [also revoke channel c1 so the doc shows up in the revocation queries] + res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) + RequireStatus(t, res, http.StatusOK) + + // 4. Try to sync + bt := btcRunner.NewBlipTesterClientOptsWithRT(rt, &BlipTesterClientOpts{ + Username: "bilbo", + SendRevocations: true, + SupportedBLIPProtocols: SupportedBLIPProtocols, + }) + defer bt.Close() - // 3. Update the user to not reference one of the roles (update to ['a1'], for example) - // [also revoke channel c1 so the doc shows up in the revocation queries] - res = rt.SendAdminRequest(http.MethodPut, fmt.Sprintf("/%s/_user/bilbo", rt.GetDatabase().Name), GetUserPayload(t, "bilbo", "test", "", collection, []string{}, []string{"a1"})) - RequireStatus(t, res, http.StatusOK) + require.NoError(t, btcRunner.StartPull(bt.id)) - // 4. Try to sync - bt, err := NewBlipTesterClientOptsWithRT(t, rt, &BlipTesterClientOpts{ - Username: "bilbo", - SendRevocations: true, + // in the failing case we'll panic before hitting this + base.RequireWaitForStat(t, func() int64 { + return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() + }, 1) }) - require.NoError(t, err) - defer bt.Close() - - require.NoError(t, bt.StartPull()) - - // in the failing case we'll panic before hitting this - base.RequireWaitForStat(t, func() int64 { - return rt.GetDatabase().DbStats.CBLReplicationPull().NumPullReplCaughtUp.Value() - }, 1) } func TestReplicatorSwitchPurgeNoReset(t *testing.T) { From d75a09b6ed1f952b4196f01c218705ad07a02bbe Mon Sep 17 00:00:00 2001 From: Ben Brooks Date: Wed, 15 Nov 2023 14:40:37 +0000 Subject: [PATCH 11/14] CBG-3589: Add 4.0.0 build (#6577) --- manifest/4.0.xml | 30 ++++++++++++++++++++++++++++++ manifest/product-config.json | 10 +++++++++- 2 files changed, 39 insertions(+), 1 deletion(-) create mode 100644 manifest/4.0.xml diff --git a/manifest/4.0.xml b/manifest/4.0.xml new file mode 100644 index 0000000000..78ecd3d33f --- /dev/null +++ b/manifest/4.0.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + + + + + + + + + + + + diff --git a/manifest/product-config.json b/manifest/product-config.json index 22097f880f..08dc2f1819 100644 --- a/manifest/product-config.json +++ b/manifest/product-config.json @@ -487,7 +487,15 @@ "trigger_blackduck": true, "start_build": 586 }, - + "manifest/4.0.xml": { + "release": "4.0.0", + "release_name": "Couchbase Sync Gateway 4.0.0", + "production": true, + "interval": 30, + "go_version": "1.21.4", + "trigger_blackduck": true, + "start_build": 1 + }, "manifest/dev.xml": { "release": "dev", "release_name": "Couchbase Sync Gateway Dev", From 5b97466aef736daedccfbc160ca4c69b8e332b1d Mon Sep 17 00:00:00 2001 From: Ben Brooks Date: Thu, 16 Nov 2023 16:24:35 +0000 Subject: [PATCH 12/14] Ignore planPIndexes directory from cbgt (MB-59477) (#6580) --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index 41723989c2..1d36337c0c 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,5 @@ __pycache__ ### Couchbase Plugin ### .cbcache/ + +planPIndexes/ From ea7eae73b579fcbca3ef6c0c22d7625b6da2a6e5 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Thu, 16 Nov 2023 11:56:25 -0500 Subject: [PATCH 13/14] Correctly put timeout in config, fixes #6576 (#6581) --- .golangci-strict.yml | 3 ++- .golangci.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.golangci-strict.yml b/.golangci-strict.yml index 18e485e41a..2a28db4557 100644 --- a/.golangci-strict.yml +++ b/.golangci-strict.yml @@ -8,7 +8,8 @@ # config file for golangci-lint -timeout: 3m +run: + timeout: 3m linters: enable: diff --git a/.golangci.yml b/.golangci.yml index 2ae7f5c367..f02eb91185 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -8,7 +8,8 @@ # config file for golangci-lint -timeout: 3m +run: + timeout: 3m linters: enable: From e287e05101f1168ac3af2fdb63fdc0ffb038ed95 Mon Sep 17 00:00:00 2001 From: Tor Colvin Date: Fri, 17 Nov 2023 02:42:43 -0500 Subject: [PATCH 14/14] CBG-3271 remove persistent rosmar buckets (#6570) * CBG-3271 remove persistent rosmar buckets - rosmar now supports persisting in memory buckets, so keep the buckets in memory until the buckets are closed. This eliminates NoCloseClone in many cases. Another PR might be able to remove even more of these. - this is prep for being able to run a bootstrap connection. --- base/leaky_bucket.go | 4 +-- base/main_test_bucket_pool.go | 28 ++++-------------- base/util_testing.go | 29 ------------------- go.mod | 4 +-- go.sum | 19 +++++++++--- rest/adminapitest/admin_api_test.go | 22 +++++++------- .../collections_admin_api_test.go | 1 - rest/api.go | 2 +- rest/api_collections_test.go | 28 +++++++++--------- rest/importtest/collections_import_test.go | 6 ++-- rest/importtest/import_test.go | 2 +- rest/server_context_test.go | 4 --- rest/sync_fn_test.go | 13 +-------- rest/upgradetest/remove_collection_test.go | 2 +- rest/upgradetest/upgrade_registry_test.go | 4 +-- rest/user_api_test.go | 4 +-- rest/utilities_testing.go | 6 +--- rest/utilities_testing_test.go | 2 +- 18 files changed, 63 insertions(+), 117 deletions(-) diff --git a/base/leaky_bucket.go b/base/leaky_bucket.go index 20ef31791a..988f86be3b 100644 --- a/base/leaky_bucket.go +++ b/base/leaky_bucket.go @@ -58,9 +58,9 @@ func (b *LeakyBucket) SetIgnoreClose(value bool) { b.config.IgnoreClose = value } -func (b *LeakyBucket) CloseAndDelete() error { +func (b *LeakyBucket) CloseAndDelete(ctx context.Context) error { if bucket, ok := b.bucket.(sgbucket.DeleteableStore); ok { - return bucket.CloseAndDelete() + return bucket.CloseAndDelete(ctx) } return nil } diff --git a/base/main_test_bucket_pool.go b/base/main_test_bucket_pool.go index 4789bc5f52..8c2d80d37d 100644 --- a/base/main_test_bucket_pool.go +++ b/base/main_test_bucket_pool.go @@ -198,29 +198,17 @@ func (tbp *TestBucketPool) GetWalrusTestBucket(t testing.TB, url string) (b Buck require.NoError(t, err) var walrusBucket *rosmar.Bucket - var typeName string + const typeName = "rosmar" bucketName := tbpBucketNamePrefix + "rosmar_" + id if url == "walrus:" || url == rosmar.InMemoryURL { - walrusBucket, err = rosmar.OpenBucket(url, rosmar.CreateOrOpen) - if err == nil { - err := walrusBucket.SetName(bucketName) - if err != nil { - tbp.Fatalf(testCtx, "Could not set name %s for rosmar bucket: %s", bucketName, err) - } - } + walrusBucket, err = rosmar.OpenBucket(url, bucketName, rosmar.CreateOrOpen) } else { walrusBucket, err = rosmar.OpenBucketIn(url, bucketName, rosmar.CreateOrOpen) } - typeName = "rosmar" if err != nil { tbp.Fatalf(testCtx, "couldn't get %s bucket from <%s>: %v", typeName, url, err) } - err = walrusBucket.SetName(bucketName) - if err != nil { - tbp.Fatalf(testCtx, "Could not set name %s for rosmar bucket: %s", bucketName, err) - } - // Wrap Walrus buckets with a leaky bucket to support vbucket IDs on feed. b = &LeakyBucket{bucket: walrusBucket, config: &LeakyBucketConfig{TapFeedVbuckets: true}} @@ -258,14 +246,10 @@ func (tbp *TestBucketPool) GetWalrusTestBucket(t testing.TB, url string) (b Buck atomic.AddInt32(&tbp.stats.NumBucketsClosed, 1) atomic.AddInt64(&tbp.stats.TotalInuseBucketNano, time.Since(openedStart).Nanoseconds()) tbp.markBucketClosed(t, b) - if url == kTestWalrusURL { - b.Close(ctx) - } else { - // Persisted buckets should call close and delete - closeErr := walrusBucket.CloseAndDelete() - if closeErr != nil { - tbp.Logf(ctx, "Unexpected error closing persistent %s bucket: %v", typeName, closeErr) - } + // Persisted buckets should call close and delete + closeErr := walrusBucket.CloseAndDelete(ctx) + if closeErr != nil { + tbp.Logf(ctx, "Unexpected error closing persistent %s bucket: %v", typeName, closeErr) } } diff --git a/base/util_testing.go b/base/util_testing.go index 5bfa416f93..5b76c94df1 100644 --- a/base/util_testing.go +++ b/base/util_testing.go @@ -102,12 +102,6 @@ func GetTestBucket(t testing.TB) *TestBucket { return getTestBucket(t, false) } -// GetTestBucket returns a test bucket from a pool. If running with walrus buckets, will persist bucket data -// across bucket close. -func GetPersistentTestBucket(t testing.TB) *TestBucket { - return getTestBucket(t, true) -} - // getTestBucket returns a bucket from the bucket pool. Persistent flag determines behaviour for walrus // buckets only - Couchbase bucket behaviour is defined by the bucket pool readier/init. func getTestBucket(t testing.TB, persistent bool) *TestBucket { @@ -204,29 +198,6 @@ func rosmarUriFromPath(path string) string { return uri + strings.ReplaceAll(path, `\`, `/`) } -// Gets a Walrus bucket which will be persisted to a temporary directory -// Returns both the test bucket which is persisted and a function which can be used to remove the created temporary -// directory once the test has finished with it. -func GetPersistentWalrusBucket(t testing.TB) (*TestBucket, func()) { - tempDir, err := os.MkdirTemp("", "walrustemp") - require.NoError(t, err) - - bucket, spec, closeFn := GTestBucketPool.GetWalrusTestBucket(t, rosmarUriFromPath(tempDir)) - - // Return this separate to closeFn as we want to avoid this being removed on database close (/_offline handling) - removeFileFunc := func() { - err := os.RemoveAll(tempDir) - require.NoError(t, err) - } - - return &TestBucket{ - Bucket: bucket, - BucketSpec: spec, - closeFn: closeFn, - t: t, - }, removeFileFunc -} - // Should Sync Gateway use XATTRS functionality when running unit tests? func TestUseXattrs() bool { useXattrs, isSet := os.LookupEnv(TestEnvSyncGatewayUseXattrs) diff --git a/go.mod b/go.mod index 3ec72ae0b3..4272f5499f 100644 --- a/go.mod +++ b/go.mod @@ -13,10 +13,10 @@ require ( github.com/couchbase/gocbcore/v10 v10.2.8 github.com/couchbase/gomemcached v0.2.1 github.com/couchbase/goutils v0.1.2 - github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148 + github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483 github.com/couchbaselabs/go-fleecedelta v0.0.0-20220909152808-6d09efa7a338 github.com/couchbaselabs/gocbconnstr v1.0.5 - github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6 + github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e github.com/elastic/gosigar v0.14.2 github.com/felixge/fgprof v0.9.3 github.com/google/uuid v1.3.1 diff --git a/go.sum b/go.sum index 9591b04d80..8da9b6abd9 100644 --- a/go.sum +++ b/go.sum @@ -1,8 +1,11 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1 h1:SEy2xmstIphdPwNBUi7uhvjyjhVKISfwjfOJmuy7kg4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.6.1/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/aws/aws-sdk-go v1.44.299 h1:HVD9lU4CAFHGxleMJp95FV/sRhtg7P4miHD1v88JAQk= @@ -38,8 +41,10 @@ github.com/couchbase/gomemcached v0.2.1 h1:lDONROGbklo8pOt4Sr4eV436PVEaKDr3o9gUl github.com/couchbase/gomemcached v0.2.1/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo= github.com/couchbase/goutils v0.1.2 h1:gWr8B6XNWPIhfalHNog3qQKfGiYyh4K4VhO3P2o9BCs= github.com/couchbase/goutils v0.1.2/go.mod h1:h89Ek/tiOxxqjz30nPPlwZdQbdB8BwgnuBxeoUe/ViE= -github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148 h1:9E3u0yA+be219iLLOjuYgagOfM7UqtZ0YIhMXysJVKs= -github.com/couchbase/sg-bucket v0.0.0-20231003103030-627c70e18148/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= +github.com/couchbase/sg-bucket v0.0.0-20231108134134-545ec7bf1a9e h1:IFv4HcdpvKFEaaszv6f1WcEbWmU276rFzOaJgarw5gw= +github.com/couchbase/sg-bucket v0.0.0-20231108134134-545ec7bf1a9e/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= +github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483 h1:K6y82On0A3coA+GwW+HGKIwpCpca6ZSvTAJwwTmzCrg= +github.com/couchbase/sg-bucket v0.0.0-20231116231254-16c1ad8b2483/go.mod h1:hy6J0RXx/Ry+5EiI8VVMetsVfBXQq5/djQLbvfRau0k= github.com/couchbase/tools-common/cloud v1.0.0 h1:SQZIccXoedbrThehc/r9BJbpi/JhwJ8X00PDjZ2gEBE= github.com/couchbase/tools-common/cloud v1.0.0/go.mod h1:6KVlRpbcnDWrvickUJ+xpqCWx1vgYYlEli/zL4xmZAg= github.com/couchbase/tools-common/fs v1.0.0 h1:HFA4xCF/r3BtZShFJUxzVvGuXtDkqGnaPzYJP3Kp1mw= @@ -57,8 +62,10 @@ github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259 h1:2T github.com/couchbaselabs/gocaves/client v0.0.0-20230404095311-05e3ba4f0259/go.mod h1:AVekAZwIY2stsJOMWLAS/0uA/+qdp7pjO8EHnl61QkY= github.com/couchbaselabs/gocbconnstr v1.0.5 h1:e0JokB5qbcz7rfnxEhNRTKz8q1svoRvDoZihsiwNigA= github.com/couchbaselabs/gocbconnstr v1.0.5/go.mod h1:KV3fnIKMi8/AzX0O9zOrO9rofEqrRF1d2rG7qqjxC7o= -github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6 h1:TeqaJ0zV0omrnvQfw4DF6o+UQQbFdBNPJVod1Y7ovQo= -github.com/couchbaselabs/rosmar v0.0.0-20231003104919-6d4a3e8a6db6/go.mod h1:+HMmQTjaINo51eSZFeCKreXYSIu6jbIp+EV9keoKl3E= +github.com/couchbaselabs/rosmar v0.0.0-20231108144220-c0c6c76bb267 h1:dIYPzphKBskYB0viAtWHX/nHOimFuxyVwK9cFA103eA= +github.com/couchbaselabs/rosmar v0.0.0-20231108144220-c0c6c76bb267/go.mod h1:AY2mDCIVElNv3rdOAyFeb7g8phFbv821FuMxX4S6MzI= +github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e h1:6DyLYnzHE4dMfuyz0UEWiBOB/PfUXrxRUy1A4478k6A= +github.com/couchbaselabs/rosmar v0.0.0-20231116232326-adb4806d011e/go.mod h1:+AjMZkAOGCeQRLjIBwehXKyWsNCPFrMKYz6lIaZ1idc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -97,6 +104,7 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y= github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= @@ -125,6 +133,7 @@ github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -254,6 +263,7 @@ google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/readline.v1 v1.0.0-20160726135117-62c6fe619375/go.mod h1:lNEQeAhU009zbRxng+XOj5ITVgY24WcbNnQopyfKoYQ= @@ -265,6 +275,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/rest/adminapitest/admin_api_test.go b/rest/adminapitest/admin_api_test.go index f764acd17d..a79bddd32c 100644 --- a/rest/adminapitest/admin_api_test.go +++ b/rest/adminapitest/admin_api_test.go @@ -1472,7 +1472,7 @@ func TestCorruptDbConfigHandling(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyConfig) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1557,7 +1557,7 @@ func TestBadConfigInsertionToBucket(t *testing.T) { base.TestsRequireBootstrapConnection(t) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1608,11 +1608,11 @@ func TestMismatchedBucketNameOnDbConfigUpdate(t *testing.T) { base.TestsRequireBootstrapConnection(t) base.RequireNumTestBuckets(t, 2) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), + CustomTestBucket: base.GetTestBucket(t), PersistentConfig: true, MutateStartupConfig: func(config *rest.StartupConfig) { // configure the interval time to pick up new configs from the bucket to every 1 seconds @@ -1643,11 +1643,11 @@ func TestMultipleBucketWithBadDbConfigScenario1(t *testing.T) { base.TestsRequireBootstrapConnection(t) base.RequireNumTestBuckets(t, 3) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) - tb3 := base.GetPersistentTestBucket(t) + tb3 := base.GetTestBucket(t) defer tb3.Close(ctx) const groupID = "60ce5544-c368-4b08-b0ed-4ca3b37973f9" @@ -1722,9 +1722,9 @@ func TestMultipleBucketWithBadDbConfigScenario2(t *testing.T) { base.RequireNumTestBuckets(t, 3) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) rt1 := rest.NewRestTester(t, &rest.RestTesterConfig{ @@ -1792,9 +1792,9 @@ func TestMultipleBucketWithBadDbConfigScenario3(t *testing.T) { base.TestsRequireBootstrapConnection(t) ctx := base.TestCtx(t) - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) defer tb1.Close(ctx) - tb2 := base.GetPersistentTestBucket(t) + tb2 := base.GetTestBucket(t) defer tb2.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ diff --git a/rest/adminapitest/collections_admin_api_test.go b/rest/adminapitest/collections_admin_api_test.go index 30eca1819d..92635b978f 100644 --- a/rest/adminapitest/collections_admin_api_test.go +++ b/rest/adminapitest/collections_admin_api_test.go @@ -179,7 +179,6 @@ func TestRequireResync(t *testing.T) { base.RequireNumTestDataStores(t, 2) base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) rtConfig := &rest.RestTesterConfig{ - CustomTestBucket: base.GetPersistentTestBucket(t), PersistentConfig: true, } diff --git a/rest/api.go b/rest/api.go index f013820a01..90b824a5f4 100644 --- a/rest/api.go +++ b/rest/api.go @@ -258,7 +258,7 @@ func (h *handler) handleFlush() error { name := h.db.Name config := h.server.GetDatabaseConfig(name) h.server.RemoveDatabase(h.ctx(), name) - err := bucket.CloseAndDelete() + err := bucket.CloseAndDelete(h.ctx()) _, err2 := h.server.AddDatabaseFromConfig(h.ctx(), config.DatabaseConfig) if err == nil { err = err2 diff --git a/rest/api_collections_test.go b/rest/api_collections_test.go index 6472f81dfb..9a7b532c68 100644 --- a/rest/api_collections_test.go +++ b/rest/api_collections_test.go @@ -267,7 +267,7 @@ func TestMultiCollectionChannelAccess(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) ctx := base.TestCtx(t) - tb := base.GetPersistentTestBucket(t) + tb := base.GetTestBucket(t) defer tb.Close(ctx) scopesConfig := GetCollectionsConfig(t, tb, 2) @@ -281,9 +281,8 @@ func TestMultiCollectionChannelAccess(t *testing.T) { scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - fmt.Println(scopesConfig) rtConfig := &RestTesterConfig{ - CustomTestBucket: tb.NoCloseClone(), + CustomTestBucket: tb, DatabaseConfig: &DatabaseConfig{DbConfig: DbConfig{ Scopes: scopesConfig, NumIndexReplicas: base.UintPtr(0), @@ -337,16 +336,21 @@ func TestMultiCollectionChannelAccess(t *testing.T) { RequireStatus(t, resp, http.StatusOK) // Add a new collection and update the db config - scopesConfig = GetCollectionsConfig(t, tb, 3) - dataStoreNames = GetDataStoreNamesFromScopesConfig(scopesConfig) + scopesConfig3Collections := GetCollectionsConfig(t, tb, 3) + dataStoreNames = GetDataStoreNamesFromScopesConfig(scopesConfig3Collections) collection3 := dataStoreNames[2].CollectionName() - scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection3] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfigString, err := json.Marshal(scopesConfig) + scopesConfig3Collections[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig3Collections[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig3Collections[scope].Collections[collection3] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfigString, err := json.Marshal(scopesConfig3Collections) require.NoError(t, err) + scopesConfig2Collections := GetCollectionsConfig(t, tb, 2) + + scopesConfig2Collections[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} + scopesConfig2Collections[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} + resp = rt.SendAdminRequest("PUT", "/db/_config", fmt.Sprintf( `{"bucket": "%s", "num_index_replicas": 0, "enable_shared_bucket_access": %t, "scopes":%s}`, tb.GetName(), base.TestUseXattrs(), string(scopesConfigString))) @@ -378,11 +382,7 @@ func TestMultiCollectionChannelAccess(t *testing.T) { RequireStatus(t, resp, http.StatusOK) // Remove collection and update the db config - scopesConfig = GetCollectionsConfig(t, tb, 2) - - scopesConfig[scope].Collections[collection1] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfig[scope].Collections[collection2] = &CollectionConfig{SyncFn: &c1SyncFunction} - scopesConfigString, err = json.Marshal(scopesConfig) + scopesConfigString, err = json.Marshal(scopesConfig2Collections) require.NoError(t, err) resp = rt.SendAdminRequest("PUT", "/db/_config", fmt.Sprintf( diff --git a/rest/importtest/collections_import_test.go b/rest/importtest/collections_import_test.go index 79e670efb2..b872f9f4c9 100644 --- a/rest/importtest/collections_import_test.go +++ b/rest/importtest/collections_import_test.go @@ -28,7 +28,7 @@ func TestMultiCollectionImportFilter(t *testing.T) { base.RequireNumTestDataStores(t, 3) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) scopesConfig := rest.GetCollectionsConfig(t, testBucket, 2) @@ -250,7 +250,7 @@ func TestMultiCollectionImportDynamicAddCollection(t *testing.T) { base.RequireNumTestDataStores(t, 2) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) rtConfig := &rest.RestTesterConfig{ @@ -346,7 +346,7 @@ func TestMultiCollectionImportRemoveCollection(t *testing.T) { base.RequireNumTestDataStores(t, numCollections) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) rtConfig := &rest.RestTesterConfig{ diff --git a/rest/importtest/import_test.go b/rest/importtest/import_test.go index 0fa9f61b82..3a7d0f3d55 100644 --- a/rest/importtest/import_test.go +++ b/rest/importtest/import_test.go @@ -2735,7 +2735,7 @@ func TestImportRollback(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyImport, base.KeyDCP) ctx := base.TestCtx(t) - bucket := base.GetPersistentTestBucket(t) + bucket := base.GetTestBucket(t) defer bucket.Close(ctx) rt := rest.NewRestTester(t, &rest.RestTesterConfig{ diff --git a/rest/server_context_test.go b/rest/server_context_test.go index 793f328f6e..9afbc89789 100644 --- a/rest/server_context_test.go +++ b/rest/server_context_test.go @@ -840,11 +840,7 @@ func TestOfflineDatabaseStartup(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - ctx := base.TestCtx(t) - bucket := base.GetPersistentTestBucket(t) - defer bucket.Close(ctx) rt := NewRestTester(t, &RestTesterConfig{ - CustomTestBucket: bucket.NoCloseClone(), DatabaseConfig: &DatabaseConfig{ DbConfig: DbConfig{ StartOffline: base.BoolPtr(true), diff --git a/rest/sync_fn_test.go b/rest/sync_fn_test.go index f85c23a5b1..e294b6e324 100644 --- a/rest/sync_fn_test.go +++ b/rest/sync_fn_test.go @@ -898,20 +898,9 @@ func TestResyncRegenerateSequences(t *testing.T) { base.SetUpTestLogging(t, base.LevelInfo, base.KeyAll) - var testBucket *base.TestBucket - - if base.UnitTestUrlIsWalrus() { - var closeFn func() - testBucket, closeFn = base.GetPersistentWalrusBucket(t) - defer closeFn() - } else { - testBucket = base.GetTestBucket(t) - } - rt := NewRestTester(t, &RestTesterConfig{ - SyncFn: syncFn, - CustomTestBucket: testBucket, + SyncFn: syncFn, }, ) defer rt.Close() diff --git a/rest/upgradetest/remove_collection_test.go b/rest/upgradetest/remove_collection_test.go index 23141b8d7e..4072586208 100644 --- a/rest/upgradetest/remove_collection_test.go +++ b/rest/upgradetest/remove_collection_test.go @@ -27,7 +27,7 @@ func TestRemoveCollection(t *testing.T) { base.TestRequiresCollections(t) base.RequireNumTestBuckets(t, 2) numCollections := 2 - bucket := base.GetPersistentTestBucket(t) + bucket := base.GetTestBucket(t) defer bucket.Close(base.TestCtx(t)) base.RequireNumTestDataStores(t, numCollections) rtConfig := &rest.RestTesterConfig{ diff --git a/rest/upgradetest/upgrade_registry_test.go b/rest/upgradetest/upgrade_registry_test.go index 880e3caad8..f9142fb5ed 100644 --- a/rest/upgradetest/upgrade_registry_test.go +++ b/rest/upgradetest/upgrade_registry_test.go @@ -189,7 +189,7 @@ func getDbConfigFromLegacyConfig(rt *rest.RestTester) string { } func TestLegacyMetadataID(t *testing.T) { - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) // Create a non-persistent rest tester. Standard RestTester // creates a database 'db' targeting the default collection (when !TestUseNamedCollections) legacyRT := rest.NewRestTesterDefaultCollection(t, &rest.RestTesterConfig{ @@ -254,7 +254,7 @@ func TestMetadataIDRenameDatabase(t *testing.T) { // Verifies that matching metadataIDs are computed if two config groups for the same database are upgraded func TestMetadataIDWithConfigGroups(t *testing.T) { - tb1 := base.GetPersistentTestBucket(t) + tb1 := base.GetTestBucket(t) // Create a non-persistent rest tester. Standard RestTester // creates a database 'db' targeting the default collection for legacy config. legacyRT := rest.NewRestTesterDefaultCollection(t, &rest.RestTesterConfig{ diff --git a/rest/user_api_test.go b/rest/user_api_test.go index fc97dbbcdf..9f7c5544fd 100644 --- a/rest/user_api_test.go +++ b/rest/user_api_test.go @@ -1530,7 +1530,7 @@ func TestGetUserCollectionAccess(t *testing.T) { base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) ctx := base.TestCtx(t) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) defer testBucket.Close(ctx) scopesConfig := GetCollectionsConfig(t, testBucket, 2) @@ -1616,7 +1616,7 @@ func TestGetUserCollectionAccess(t *testing.T) { func TestPutUserCollectionAccess(t *testing.T) { base.RequireNumTestDataStores(t, 2) base.SetUpTestLogging(t, base.LevelDebug, base.KeyAll) - testBucket := base.GetPersistentTestBucket(t) + testBucket := base.GetTestBucket(t) scopesConfig := GetCollectionsConfig(t, testBucket, 2) rtConfig := &RestTesterConfig{ diff --git a/rest/utilities_testing.go b/rest/utilities_testing.go index 5b577a7ec4..078284cca8 100644 --- a/rest/utilities_testing.go +++ b/rest/utilities_testing.go @@ -166,11 +166,7 @@ func (rt *RestTester) Bucket() base.Bucket { // If we have a TestBucket defined on the RestTesterConfig, use that instead of requesting a new one. testBucket := rt.RestTesterConfig.CustomTestBucket if testBucket == nil { - if rt.PersistentConfig { - testBucket = base.GetPersistentTestBucket(rt.TB) - } else { - testBucket = base.GetTestBucket(rt.TB) - } + testBucket = base.GetTestBucket(rt.TB) if rt.leakyBucketConfig != nil { leakyConfig := *rt.leakyBucketConfig // Ignore closures to avoid double closing panics diff --git a/rest/utilities_testing_test.go b/rest/utilities_testing_test.go index f17c49192d..b41d471df6 100644 --- a/rest/utilities_testing_test.go +++ b/rest/utilities_testing_test.go @@ -261,7 +261,7 @@ func TestRestTesterTemplateMultipleDatabases(t *testing.T) { } base.RequireNumTestBuckets(t, 2) ctx := base.TestCtx(t) - bucket2 := base.GetPersistentTestBucket(t) + bucket2 := base.GetTestBucket(t) defer bucket2.Close(ctx) dbConfig = DbConfig{ Scopes: GetCollectionsConfig(rt.TB, bucket2, numCollections),