Skip to content

Commit

Permalink
add MoveSubBucket test
Browse files Browse the repository at this point in the history
Signed-off-by: Mustafa Elbehery <melbeher@redhat.com>
  • Loading branch information
Elbehery committed Dec 28, 2023
1 parent 3179540 commit 33bcf3b
Show file tree
Hide file tree
Showing 4 changed files with 370 additions and 3 deletions.
5 changes: 3 additions & 2 deletions bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,8 @@ func (b *Bucket) DeleteBucket(key []byte) error {
// Returns an error if
// 1. the sub-bucket cannot be found in the source bucket;
// 2. or the key already exists in the destination bucket;
// 3. the key represents a non-bucket value.
// 3. or the key represents a non-bucket value;
// 4. the source and destination buckets are the same.
func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error {
if b.tx.db == nil || dstBucket.tx.db == nil {
return errors.ErrTxClosed
Expand All @@ -319,7 +320,7 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error {
// Do nothing (return true directly) if the source bucket and the
// destination bucket are actually the same bucket.
if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) {
return nil
return fmt.Errorf("source bucket %s and target bucket %s are the same: %w", b.String(), dstBucket.String(), errors.ErrSameBuckets)
}

// check whether the key already exists in the destination bucket
Expand Down
6 changes: 5 additions & 1 deletion errors/errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,12 @@ var (
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")

// ErrIncompatibleValue is returned when trying create or delete a bucket
// ErrIncompatibleValue is returned when trying to create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")

// ErrSameBuckets is returned when trying to move a sub-bucket between
// source and target buckets, while source and target buckets are the same.
ErrSameBuckets = errors.New("the source and target are the same bucket")
)
316 changes: 316 additions & 0 deletions movebucket_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,316 @@
package bbolt_test

import (
"bytes"
crand "crypto/rand"
"math/rand"
"os"
"path/filepath"
"strings"
"testing"

"go.etcd.io/bbolt"
"go.etcd.io/bbolt/errors"
"go.etcd.io/bbolt/internal/btesting"

"github.com/stretchr/testify/require"
)

func TestTx_MoveBucket(t *testing.T) {
testCases := []struct {
name string
srcBucketPath []string
dstBucketPath []string
bucketToMove string
incompatibleKeyInSrc bool
incompatibleKeyInDst bool
parentSrc bool
parentDst bool
expActErr error
}{
{
name: "happy path",
srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"},
dstBucketPath: []string{"db1", "db2"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: true,
parentDst: false,
expActErr: nil,
},
{
name: "bucketToMove not exist in srcBucket",
srcBucketPath: []string{"sb1", "sb2"},
dstBucketPath: []string{"db1", "db2"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: false,
parentDst: false,
expActErr: errors.ErrBucketNotFound,
},
{
name: "bucketToMove exist in dstBucket",
srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"},
dstBucketPath: []string{"db1", "db2", "sb3ToMove"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: true,
parentDst: true,
expActErr: errors.ErrBucketExists,
},
{
name: "bucketToMove key exist in srcBucket but no subBucket value",
srcBucketPath: []string{"sb1", "sb2"},
dstBucketPath: []string{"db1", "db2"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: true,
incompatibleKeyInDst: false,
parentSrc: true,
parentDst: false,
expActErr: errors.ErrIncompatibleValue,
},
{
name: "bucketToMove key exist in dstBucket but no subBucket value",
srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"},
dstBucketPath: []string{"db1", "db2"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: true,
parentSrc: true,
parentDst: true,
expActErr: errors.ErrIncompatibleValue,
},
{
name: "srcBucket is rootBucket",
srcBucketPath: []string{"", "sb3ToMove"},
dstBucketPath: []string{"db1", "db2"},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: true,
parentDst: false,
expActErr: nil,
},
{
name: "dstBucket is rootBucket",
srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"},
dstBucketPath: []string{""},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: true,
parentDst: false,
expActErr: nil,
},
{
name: "srcBucket is rootBucket and dstBucket is rootBucket",
srcBucketPath: []string{"", "sb3ToMove"},
dstBucketPath: []string{""},
bucketToMove: "sb3ToMove",
incompatibleKeyInSrc: false,
incompatibleKeyInDst: false,
parentSrc: false,
parentDst: false,
expActErr: errors.ErrSameBuckets,
},
}

for _, tc := range testCases {

t.Run(tc.name, func(*testing.T) {
db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize})

dumpBucketBeforeMoving := filepath.Join(t.TempDir(), "beforeBucket.db")
dumpBucketAfterMoving := filepath.Join(t.TempDir(), "afterBucket.db")

// arrange
if err := db.Update(func(tx *bbolt.Tx) error {
srcBucket := openBuckets(t, tx, tc.incompatibleKeyInSrc, true, false, tc.srcBucketPath...)
dstBucket := openBuckets(t, tx, tc.incompatibleKeyInDst, true, false, tc.dstBucketPath...)

if tc.incompatibleKeyInSrc {
if pErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil {
t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", srcBucket, pErr)
}
}

if tc.incompatibleKeyInDst {
if pErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil {
t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", dstBucket, pErr)
}
}

return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()

// act
if err := db.Update(func(tx *bbolt.Tx) error {
srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...)
dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...)

var bucketToMove *bbolt.Bucket
if srcBucket != nil {
bucketToMove = srcBucket.Bucket([]byte(tc.bucketToMove))
} else {
bucketToMove = tx.Bucket([]byte(tc.bucketToMove))
}

if tc.expActErr == nil && bucketToMove != nil {
if wErr := dumpBucket([]byte(tc.bucketToMove), bucketToMove, dumpBucketBeforeMoving); wErr != nil {
t.Fatalf("error dumping bucket %v to file %v: %v", bucketToMove.String(), dumpBucketBeforeMoving, wErr)
}
}

mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket)
require.ErrorIs(t, mErr, tc.expActErr)

return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()

// skip assertion if failure expected
if tc.expActErr != nil {
return
}

// assert
if err := db.Update(func(tx *bbolt.Tx) error {
var movedBucket *bbolt.Bucket
srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...)

if srcBucket != nil {
if movedBucket = srcBucket.Bucket([]byte(tc.bucketToMove)); movedBucket != nil {
t.Fatalf("expected childBucket %v to be moved from srcBucket %v", tc.bucketToMove, srcBucket)
}
} else {
if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket != nil {
t.Fatalf("expected childBucket %v to be moved from root bucket %v", tc.bucketToMove, "root bucket")
}
}

dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...)
if dstBucket != nil {
if movedBucket = dstBucket.Bucket([]byte(tc.bucketToMove)); movedBucket == nil {
t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, dstBucket)
}
} else {
if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket == nil {
t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, "root bucket")
}
}

wErr := dumpBucket([]byte(tc.bucketToMove), movedBucket, dumpBucketAfterMoving)
if wErr != nil {
t.Fatalf("error dumping bucket %v to file %v", movedBucket.String(), dumpBucketAfterMoving)
}

beforeBucket := readBucketFromFile(t, dumpBucketBeforeMoving)
afterBucket := readBucketFromFile(t, dumpBucketAfterMoving)

if !bytes.Equal(beforeBucket, afterBucket) {
t.Fatalf("bucket's content before moving is different than after moving")
}

return nil
}); err != nil {
t.Fatal(err)
}
db.MustCheck()
})
}
}

func openBuckets(t testing.TB, tx *bbolt.Tx, incompatibleKey bool, init bool, parent bool, paths ...string) *bbolt.Bucket {
t.Helper()

var bk *bbolt.Bucket
var err error

idx := len(paths) - 1
for i, key := range paths {
if len(key) == 0 {
if !init {
break
}
continue
}
if (incompatibleKey && i == idx) || (parent && i == idx) {
continue
}
if bk == nil {
bk, err = tx.CreateBucketIfNotExists([]byte(key))
} else {
bk, err = bk.CreateBucketIfNotExists([]byte(key))
}
if err != nil {
t.Fatalf("error creating bucket %v: %v", key, err)
}
if init {
insertRandKeysValuesBucket(t, bk, rand.Intn(4096))
}
}

return bk
}

func retrieveBucket(t testing.TB, tx *bbolt.Tx, paths ...string) *bbolt.Bucket {
t.Helper()

paths = paths[:len(paths)-1]
var bk *bbolt.Bucket = nil
for _, path := range paths {
// skip root bucket
if path == "" {
continue
}
if bk == nil {
bk = tx.Bucket([]byte(path))
} else {
bk = bk.Bucket([]byte(path))
}
if bk == nil {
t.Fatalf("error retrieving bucket %v within paths %v", path, strings.TrimSuffix(strings.Join(paths, "->"), "->"))
}
}

return bk
}

func readBucketFromFile(t testing.TB, tmpFile string) []byte {
data, err := os.ReadFile(tmpFile)
if err != nil {
t.Fatalf("error reading temp file %v", tmpFile)
}

return data
}

func insertRandKeysValuesBucket(t testing.TB, bk *bbolt.Bucket, n int) {
var min, max = 1, 1024

for i := 0; i < n; i++ {
// generate rand key/value length
keyLength := rand.Intn(max-min) + min
valLength := rand.Intn(max-min) + min

keyData := make([]byte, keyLength)
valData := make([]byte, valLength)

_, err := crand.Read(keyData)
require.NoError(t, err)

_, err = crand.Read(valData)
require.NoError(t, err)

err = bk.Put(keyData, valData)
require.NoError(t, err)
}
}
46 changes: 46 additions & 0 deletions utils_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
package bbolt_test

import (
bolt "go.etcd.io/bbolt"
"go.etcd.io/bbolt/internal/common"
)

// `dumpBucket` dumps all the data, including both key/value data
// and child buckets, from the source bucket into the target db file.
func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string) error {
common.Assert(len(srcBucketName) != 0, "source bucket name can't be empty")
common.Assert(srcBucket != nil, "the source bucket can't be nil")
common.Assert(len(dstFilename) != 0, "the target file path can't be empty")

dstDB, err := bolt.Open(dstFilename, 0600, nil)
if err != nil {
return err
}

return dstDB.Update(func(tx *bolt.Tx) error {
dstBucket, err := tx.CreateBucket(srcBucketName)
if err != nil {
return err
}
return cloneBucket(srcBucket, dstBucket)
})
}

func cloneBucket(src *bolt.Bucket, dst *bolt.Bucket) error {
return src.ForEach(func(k, v []byte) error {
if v == nil {
srcChild := src.Bucket(k)
dstChild, err := dst.CreateBucket(k)
if err != nil {
return err
}
if err = dstChild.SetSequence(srcChild.Sequence()); err != nil {
return err
}

return cloneBucket(srcChild, dstChild)
}

return dst.Put(k, v)
})
}

0 comments on commit 33bcf3b

Please sign in to comment.