From 27ded38c22bffa309c2b074b8dcdb701fb2de957 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 24 Nov 2023 14:52:08 +0000 Subject: [PATCH 1/4] add MoveBucket to support moving a sub-bucket from one bucket to another bucket Signed-off-by: Benjamin Wang --- bucket.go | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ tx.go | 18 ++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/bucket.go b/bucket.go index d9b384a2a..c1e8d126c 100644 --- a/bucket.go +++ b/bucket.go @@ -322,6 +322,59 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return fmt.Errorf("key %q isn't a bucket in the source bucket: %w", key, errors.ErrIncompatibleValue) + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + return nil + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(key) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(key, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + return fmt.Errorf("key %q already exists in the target bucket: %w", key, errors.ErrIncompatibleValue) + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(key)) + c.node().del(key) + + // add te sub-bucket to the destination bucket + newKey := cloneBytes(key) + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. diff --git a/tx.go b/tx.go index 8e624e7b2..81913b0fe 100644 --- a/tx.go +++ b/tx.go @@ -127,6 +127,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. From ac355dec240ce753edc453e455e1c70ffac83a2b Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 12 Dec 2023 12:49:44 +0100 Subject: [PATCH 2/4] add MoveSubBucket test Signed-off-by: Mustafa Elbehery --- bucket.go | 5 +- errors/errors.go | 6 +- movebucket_test.go | 291 +++++++++++++++++++++++++++++++++++++++++++++ utils_test.go | 46 +++++++ 4 files changed, 345 insertions(+), 3 deletions(-) create mode 100644 movebucket_test.go create mode 100644 utils_test.go diff --git a/bucket.go b/bucket.go index c1e8d126c..78a68f548 100644 --- a/bucket.go +++ b/bucket.go @@ -326,7 +326,8 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { // Returns an error if // 1. the sub-bucket cannot be found in the source bucket; // 2. or the key already exists in the destination bucket; -// 3. the key represents a non-bucket value. +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed @@ -348,7 +349,7 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { // Do nothing (return true directly) if the source bucket and the // destination bucket are actually the same bucket. if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { - return nil + return fmt.Errorf("source bucket %s and target bucket %s are the same: %w", b.String(), dstBucket.String(), errors.ErrSameBuckets) } // check whether the key already exists in the destination bucket diff --git a/errors/errors.go b/errors/errors.go index 9598cbd8a..5709bcf2c 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -69,8 +69,12 @@ var ( // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. ErrValueTooLarge = errors.New("value too large") - // ErrIncompatibleValue is returned when trying create or delete a bucket + // ErrIncompatibleValue is returned when trying to create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") ) diff --git a/movebucket_test.go b/movebucket_test.go new file mode 100644 index 000000000..21789c40a --- /dev/null +++ b/movebucket_test.go @@ -0,0 +1,291 @@ +package bbolt_test + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "os" + "testing" + + "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/btesting" + + "github.com/stretchr/testify/require" +) + +func TestTx_MoveBucket(t *testing.T) { + testCases := []struct { + name string + srcBucketPath []string + dstBucketPath []string + bucketToMove string + incompatibleKeyInSrc bool + incompatibleKeyInDst bool + parentSrc bool + parentDst bool + expActErr error + }{ + { + name: "happy path", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "bucketToMove not exist in srcBucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: false, + parentDst: false, + expActErr: errors.ErrBucketNotFound, + }, + { + name: "bucketToMove exist in dstBucket", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2", "sb3ToMove"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: true, + expActErr: errors.ErrBucketExists, + }, + { + name: "bucketToMove key exist in srcBucket but no subBucket value", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: true, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: errors.ErrIncompatibleValue, + }, + { + name: "bucketToMove key exist in dstBucket but no subBucket value", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: true, + parentSrc: true, + parentDst: true, + expActErr: errors.ErrIncompatibleValue, + }, + { + name: "srcBucket is rootBucket", + srcBucketPath: []string{"", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "dstBucket is rootBucket", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{""}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "srcBucket is rootBucket and dstBucket is rootBucket", + srcBucketPath: []string{"", "sb3ToMove"}, + dstBucketPath: []string{""}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: false, + parentDst: false, + expActErr: errors.ErrSameBuckets, + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(*testing.T) { + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) + + dumpBucketBeforeMoving := tempfile() + dumpBucketAfterMoving := tempfile() + + // arrange + if err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := openBuckets(t, tx, tc.incompatibleKeyInSrc, true, false, tc.srcBucketPath...) + dstBucket := openBuckets(t, tx, tc.incompatibleKeyInDst, true, false, tc.dstBucketPath...) + + if tc.incompatibleKeyInSrc { + if pErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { + t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", srcBucket, pErr) + } + } + + if tc.incompatibleKeyInDst { + if pErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { + t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", dstBucket, pErr) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // act + if err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) + dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + + var bucketToMove *bbolt.Bucket + if srcBucket != nil { + bucketToMove = srcBucket.Bucket([]byte(tc.bucketToMove)) + } else { + bucketToMove = tx.Bucket([]byte(tc.bucketToMove)) + } + + if tc.expActErr == nil && bucketToMove != nil { + if wErr := dumpBucket([]byte(tc.bucketToMove), bucketToMove, dumpBucketBeforeMoving); wErr != nil { + t.Fatalf("error dumping bucket %v to file %v: %v", bucketToMove.String(), dumpBucketBeforeMoving, wErr) + } + } + + mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket) + require.ErrorIs(t, mErr, tc.expActErr) + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // skip assertion if failure expected + if tc.expActErr != nil { + return + } + + // assert + if err := db.Update(func(tx *bbolt.Tx) error { + var movedBucket *bbolt.Bucket + srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) + + if srcBucket != nil { + if movedBucket = srcBucket.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { + t.Fatalf("expected childBucket %v to be moved from srcBucket %v", tc.bucketToMove, srcBucket) + } + } else { + if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { + t.Fatalf("expected childBucket %v to be moved from root bucket %v", tc.bucketToMove, "root bucket") + } + } + + dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + if dstBucket != nil { + if movedBucket = dstBucket.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { + t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, dstBucket) + } + } else { + if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { + t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, "root bucket") + } + } + + wErr := dumpBucket([]byte(tc.bucketToMove), movedBucket, dumpBucketAfterMoving) + if wErr != nil { + t.Fatalf("error dumping bucket %v to file %v", movedBucket.String(), dumpBucketAfterMoving) + } + + beforeBucket := readBucketFromFile(t, dumpBucketBeforeMoving) + afterBucket := readBucketFromFile(t, dumpBucketAfterMoving) + + if !bytes.Equal(beforeBucket, afterBucket) { + t.Fatalf("bucket's content before moving is different than after moving") + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + }) + } +} + +func openBuckets(t testing.TB, tx *bbolt.Tx, incompatibleKey bool, init bool, parent bool, paths ...string) *bbolt.Bucket { + t.Helper() + + var bk *bbolt.Bucket + var err error + + idx := len(paths) - 1 + for i, key := range paths { + if len(key) == 0 { + if !init { + break + } + continue + } + if (incompatibleKey && i == idx) || (parent && i == idx) { + continue + } + if bk == nil { + bk, err = tx.CreateBucketIfNotExists([]byte(key)) + } else { + bk, err = bk.CreateBucketIfNotExists([]byte(key)) + } + if err != nil { + t.Fatalf("error creating bucket %v: %v", key, err) + } + if init { + insertRandKeysValuesBucket(t, bk, rand.Intn(4096)) + } + } + + return bk +} + +func readBucketFromFile(t testing.TB, tmpFile string) []byte { + data, err := os.ReadFile(tmpFile) + if err != nil { + t.Fatalf("error reading temp file %v", tmpFile) + } + + return data +} + +func insertRandKeysValuesBucket(t testing.TB, bk *bbolt.Bucket, n int) { + var min, max = 1, 1024 + + for i := 0; i < n; i++ { + // generate rand key/value length + keyLength := rand.Intn(max-min) + min + valLength := rand.Intn(max-min) + min + + keyData := make([]byte, keyLength) + valData := make([]byte, valLength) + + _, err := crand.Read(keyData) + require.NoError(t, err) + + _, err = crand.Read(valData) + require.NoError(t, err) + + err = bk.Put(keyData, valData) + require.NoError(t, err) + } +} diff --git a/utils_test.go b/utils_test.go new file mode 100644 index 000000000..867109493 --- /dev/null +++ b/utils_test.go @@ -0,0 +1,46 @@ +package bbolt_test + +import ( + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" +) + +// `dumpBucket` dumps all the data, including both key/value data +// and child buckets, from the source bucket into the target db file. +func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string) error { + common.Assert(len(srcBucketName) != 0, "source bucket name can't be empty") + common.Assert(srcBucket != nil, "the source bucket can't be nil") + common.Assert(len(dstFilename) != 0, "the target file path can't be empty") + + dstDB, err := bolt.Open(dstFilename, 0600, nil) + if err != nil { + return err + } + + return dstDB.Update(func(tx *bolt.Tx) error { + dstBucket, err := tx.CreateBucket(srcBucketName) + if err != nil { + return err + } + return cloneBucket(srcBucket, dstBucket) + }) +} + +func cloneBucket(src *bolt.Bucket, dst *bolt.Bucket) error { + return src.ForEach(func(k, v []byte) error { + if v == nil { + srcChild := src.Bucket(k) + dstChild, err := dst.CreateBucket(k) + if err != nil { + return err + } + if err = dstChild.SetSequence(srcChild.Sequence()); err != nil { + return err + } + + return cloneBucket(srcChild, dstChild) + } + + return dst.Put(k, v) + }) +} From 0bd26bc48ce36b29006eb94983e09f7a9f1aa03d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 2 Jan 2024 13:35:38 +0000 Subject: [PATCH 3/4] Refactor test case TestTx_MoveBucket and add log for MoveBucket Signed-off-by: Benjamin Wang --- movebucket_test.go | 376 ++++++++++++++++++++++----------------------- utils_test.go | 1 + 2 files changed, 181 insertions(+), 196 deletions(-) diff --git a/movebucket_test.go b/movebucket_test.go index 21789c40a..b89b9602f 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -1,10 +1,10 @@ package bbolt_test import ( - "bytes" crand "crypto/rand" "math/rand" "os" + "path/filepath" "testing" "go.etcd.io/bbolt" @@ -16,259 +16,243 @@ import ( func TestTx_MoveBucket(t *testing.T) { testCases := []struct { - name string - srcBucketPath []string - dstBucketPath []string - bucketToMove string - incompatibleKeyInSrc bool - incompatibleKeyInDst bool - parentSrc bool - parentDst bool - expActErr error + name string + srcBucketPath []string + dstBucketPath []string + bucketToMove string + bucketExistInSrc bool + bucketExistInDst bool + hasIncompatibleKeyInSrc bool + hasIncompatibleKeyInDst bool + expectedErr error }{ + // normal cases { - name: "happy path", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "normal case", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove not exist in srcBucket", - srcBucketPath: []string{"sb1", "sb2"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: false, - parentDst: false, - expActErr: errors.ErrBucketNotFound, + name: "the source and target bucket share the same grandparent", + srcBucketPath: []string{"grandparent", "sb2"}, + dstBucketPath: []string{"grandparent", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove exist in dstBucket", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2", "sb3ToMove"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: true, - expActErr: errors.ErrBucketExists, + name: "bucketToMove is a top level bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove key exist in srcBucket but no subBucket value", - srcBucketPath: []string{"sb1", "sb2"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: true, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: errors.ErrIncompatibleValue, + name: "convert bucketToMove to a top level bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, + // negative cases { - name: "bucketToMove key exist in dstBucket but no subBucket value", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: true, - parentSrc: true, - parentDst: true, - expActErr: errors.ErrIncompatibleValue, + name: "bucketToMove not exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketNotFound, }, { - name: "srcBucket is rootBucket", - srcBucketPath: []string{"", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "bucketToMove exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: true, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketExists, }, { - name: "dstBucket is rootBucket", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{""}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "incompatible key exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: true, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrIncompatibleValue, }, { - name: "srcBucket is rootBucket and dstBucket is rootBucket", - srcBucketPath: []string{"", "sb3ToMove"}, - dstBucketPath: []string{""}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: false, - parentDst: false, - expActErr: errors.ErrSameBuckets, + name: "incompatible key exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: true, + expectedErr: errors.ErrIncompatibleValue, + }, + { + name: "the source and target are the same bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"sb1", "sb2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, + }, + { + name: "both the source and target are the root bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, }, } for _, tc := range testCases { t.Run(tc.name, func(*testing.T) { - db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) - dumpBucketBeforeMoving := tempfile() - dumpBucketAfterMoving := tempfile() + dumpBucketBeforeMoving := filepath.Join(t.TempDir(), "dbBeforeMove") + dumpBucketAfterMoving := filepath.Join(t.TempDir(), "dbAfterMove") - // arrange - if err := db.Update(func(tx *bbolt.Tx) error { - srcBucket := openBuckets(t, tx, tc.incompatibleKeyInSrc, true, false, tc.srcBucketPath...) - dstBucket := openBuckets(t, tx, tc.incompatibleKeyInDst, true, false, tc.dstBucketPath...) + t.Log("Creating sample db and populate some data") + err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) - if tc.incompatibleKeyInSrc { - if pErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { - t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", srcBucket, pErr) - } + if tc.bucketExistInSrc { + _ = createBucketAndPopulateData(t, tx, srcBucket, tc.bucketToMove) } - if tc.incompatibleKeyInDst { - if pErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { - t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", dstBucket, pErr) - } + if tc.bucketExistInDst { + _ = createBucketAndPopulateData(t, tx, dstBucket, tc.bucketToMove) } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // act - if err := db.Update(func(tx *bbolt.Tx) error { - srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) - dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + if tc.hasIncompatibleKeyInSrc { + putErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) + } - var bucketToMove *bbolt.Bucket - if srcBucket != nil { - bucketToMove = srcBucket.Bucket([]byte(tc.bucketToMove)) - } else { - bucketToMove = tx.Bucket([]byte(tc.bucketToMove)) + if tc.hasIncompatibleKeyInDst { + putErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) } - if tc.expActErr == nil && bucketToMove != nil { - if wErr := dumpBucket([]byte(tc.bucketToMove), bucketToMove, dumpBucketBeforeMoving); wErr != nil { - t.Fatalf("error dumping bucket %v to file %v: %v", bucketToMove.String(), dumpBucketBeforeMoving, wErr) - } + return nil + }) + require.NoError(t, err) + + t.Log("Moving bucket") + err = db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s before moving it", dumpBucketBeforeMoving) + bk := openBucket(tx, srcBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketBeforeMoving) + require.NoError(t, dumpErr) } mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket) - require.ErrorIs(t, mErr, tc.expActErr) + require.Equal(t, tc.expectedErr, mErr) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s after moving it", dumpBucketAfterMoving) + bk := openBucket(tx, dstBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketAfterMoving) + require.NoError(t, dumpErr) + } return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() + }) + require.NoError(t, err) // skip assertion if failure expected - if tc.expActErr != nil { + if tc.expectedErr != nil { return } - // assert - if err := db.Update(func(tx *bbolt.Tx) error { - var movedBucket *bbolt.Bucket - srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) - - if srcBucket != nil { - if movedBucket = srcBucket.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { - t.Fatalf("expected childBucket %v to be moved from srcBucket %v", tc.bucketToMove, srcBucket) - } - } else { - if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { - t.Fatalf("expected childBucket %v to be moved from root bucket %v", tc.bucketToMove, "root bucket") - } - } - - dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) - if dstBucket != nil { - if movedBucket = dstBucket.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { - t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, dstBucket) - } - } else { - if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { - t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, "root bucket") - } - } - - wErr := dumpBucket([]byte(tc.bucketToMove), movedBucket, dumpBucketAfterMoving) - if wErr != nil { - t.Fatalf("error dumping bucket %v to file %v", movedBucket.String(), dumpBucketAfterMoving) - } - - beforeBucket := readBucketFromFile(t, dumpBucketBeforeMoving) - afterBucket := readBucketFromFile(t, dumpBucketAfterMoving) - - if !bytes.Equal(beforeBucket, afterBucket) { - t.Fatalf("bucket's content before moving is different than after moving") - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() + t.Log("Verifying the bucket should be identical before and after being moved") + dataBeforeMove, err := os.ReadFile(dumpBucketBeforeMoving) + require.NoError(t, err) + dataAfterMove, err := os.ReadFile(dumpBucketAfterMoving) + require.NoError(t, err) + require.Equal(t, dataBeforeMove, dataAfterMove) }) } } -func openBuckets(t testing.TB, tx *bbolt.Tx, incompatibleKey bool, init bool, parent bool, paths ...string) *bbolt.Bucket { - t.Helper() - +// prepareBuckets opens the bucket chain. For each bucket in the chain, +// open it if existed, otherwise create it and populate sample data. +func prepareBuckets(t testing.TB, tx *bbolt.Tx, buckets ...string) *bbolt.Bucket { var bk *bbolt.Bucket - var err error - idx := len(paths) - 1 - for i, key := range paths { - if len(key) == 0 { - if !init { - break - } - continue - } - if (incompatibleKey && i == idx) || (parent && i == idx) { - continue - } - if bk == nil { - bk, err = tx.CreateBucketIfNotExists([]byte(key)) + for _, key := range buckets { + if childBucket := openBucket(tx, bk, key); childBucket == nil { + bk = createBucketAndPopulateData(t, tx, bk, key) } else { - bk, err = bk.CreateBucketIfNotExists([]byte(key)) - } - if err != nil { - t.Fatalf("error creating bucket %v: %v", key, err) - } - if init { - insertRandKeysValuesBucket(t, bk, rand.Intn(4096)) + bk = childBucket } } - return bk } -func readBucketFromFile(t testing.TB, tmpFile string) []byte { - data, err := os.ReadFile(tmpFile) - if err != nil { - t.Fatalf("error reading temp file %v", tmpFile) +func openBucket(tx *bbolt.Tx, bk *bbolt.Bucket, bucketToOpen string) *bbolt.Bucket { + if bk == nil { + return tx.Bucket([]byte(bucketToOpen)) + } + return bk.Bucket([]byte(bucketToOpen)) +} + +func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, bucketName string) *bbolt.Bucket { + if bk == nil { + newBucket, err := tx.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) + return newBucket } - return data + newBucket, err := bk.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, bk, rand.Intn(4096)) + return newBucket } -func insertRandKeysValuesBucket(t testing.TB, bk *bbolt.Bucket, n int) { +func populateSampleDataInBucket(t testing.TB, bk *bbolt.Bucket, n int) { var min, max = 1, 1024 for i := 0; i < n; i++ { diff --git a/utils_test.go b/utils_test.go index 867109493..1a4f23939 100644 --- a/utils_test.go +++ b/utils_test.go @@ -16,6 +16,7 @@ func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string if err != nil { return err } + defer dstDB.Close() return dstDB.Update(func(tx *bolt.Tx) error { dstBucket, err := tx.CreateBucket(srcBucketName) From 886eccbdf505b939898b39aa1889c93cede58dc0 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 2 Jan 2024 14:11:09 +0000 Subject: [PATCH 4/4] Add log into MoveBucket and clone the key Signed-off-by: Benjamin Wang --- bucket.go | 48 ++++++++++++++++++++++++++++++---------------- movebucket_test.go | 2 +- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/bucket.go b/bucket.go index 78a68f548..9fbc9766c 100644 --- a/bucket.go +++ b/bucket.go @@ -285,19 +285,21 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { + if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) + child := b.Bucket(newKey) err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) @@ -309,7 +311,7 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -317,7 +319,7 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } @@ -328,48 +330,62 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { // 2. or the key already exists in the destination bucket; // 3. or the key represents a non-bucket value; // 4. the source and destination buckets are the same. -func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Moving bucket %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", string(key), err) + } else { + lg.Debugf("Moving bucket %q successfully", string(key)) + } + }() + if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed } else if !dstBucket.Writable() { return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, v, flags := c.seek(key) + k, v, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { + if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { - return fmt.Errorf("key %q isn't a bucket in the source bucket: %w", key, errors.ErrIncompatibleValue) + lg.Errorf("An incompatible key %s exists in the source bucket", string(newKey)) + return errors.ErrIncompatibleValue } // Do nothing (return true directly) if the source bucket and the // destination bucket are actually the same bucket. if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { - return fmt.Errorf("source bucket %s and target bucket %s are the same: %w", b.String(), dstBucket.String(), errors.ErrSameBuckets) + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b.String(), dstBucket.String()) + return errors.ErrSameBuckets } // check whether the key already exists in the destination bucket curDst := dstBucket.Cursor() - k, _, flags = curDst.seek(key) + k, _, flags = curDst.seek(newKey) // Return an error if there is an existing key in the destination bucket. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & common.BucketLeafFlag) != 0 { return errors.ErrBucketExists } - return fmt.Errorf("key %q already exists in the target bucket: %w", key, errors.ErrIncompatibleValue) + lg.Errorf("An incompatible key %s exists in the target bucket", string(newKey)) + return errors.ErrIncompatibleValue } // remove the sub-bucket from the source bucket - delete(b.buckets, string(key)) - c.node().del(key) + delete(b.buckets, string(newKey)) + c.node().del(newKey) // add te sub-bucket to the destination bucket - newKey := cloneBytes(key) newValue := cloneBytes(v) curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) diff --git a/movebucket_test.go b/movebucket_test.go index b89b9602f..0b60d95bd 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -248,7 +248,7 @@ func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, b newBucket, err := bk.CreateBucket([]byte(bucketName)) require.NoError(t, err, "failed to create bucket %s", bucketName) - populateSampleDataInBucket(t, bk, rand.Intn(4096)) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) return newBucket }