From 3a843b911b3d4aac48e2a61e7778284c664bd155 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Mon, 7 May 2018 06:05:04 -0400 Subject: [PATCH 01/38] storagepacker V2 --- helper/cryptoutil/cryptoutil.go | 14 + helper/cryptoutil/cryptoutil_test.go | 14 + .../{storagepacker.go => storagepacker_v1.go} | 46 +- ...acker_test.go => storagepacker_v1_test.go} | 28 +- helper/storagepacker/storagepacker_v2.go | 613 ++++++++++++++++++ helper/storagepacker/storagepacker_v2_test.go | 190 ++++++ helper/storagepacker/types.pb.go | 132 +++- helper/storagepacker/types.proto | 30 +- helper/strutil/strutil.go | 44 ++ logical/logical_storage.go | 82 +++ vault/identity_store.go | 6 +- vault/identity_store_structs.go | 4 +- vault/identity_store_util.go | 2 +- 13 files changed, 1145 insertions(+), 60 deletions(-) create mode 100644 helper/cryptoutil/cryptoutil.go create mode 100644 helper/cryptoutil/cryptoutil_test.go rename helper/storagepacker/{storagepacker.go => storagepacker_v1.go} (84%) rename helper/storagepacker/{storagepacker_test.go => storagepacker_v1_test.go} (83%) create mode 100644 helper/storagepacker/storagepacker_v2.go create mode 100644 helper/storagepacker/storagepacker_v2_test.go create mode 100644 logical/logical_storage.go diff --git a/helper/cryptoutil/cryptoutil.go b/helper/cryptoutil/cryptoutil.go new file mode 100644 index 0000000000000..20a59459b1e13 --- /dev/null +++ b/helper/cryptoutil/cryptoutil.go @@ -0,0 +1,14 @@ +package cryptoutil + +import "golang.org/x/crypto/blake2b" + +func Blake2b256Hash(key string) ([]byte, error) { + hf, err := blake2b.New256(nil) + if err != nil { + return nil, err + } + + hf.Write([]byte(key)) + + return hf.Sum(nil), nil +} diff --git a/helper/cryptoutil/cryptoutil_test.go b/helper/cryptoutil/cryptoutil_test.go new file mode 100644 index 0000000000000..f08ca397a101b --- /dev/null +++ b/helper/cryptoutil/cryptoutil_test.go @@ -0,0 +1,14 @@ +package cryptoutil + +import "testing" + +func TestBlake2b256Hash(t *testing.T) { + hashVal, err = Blake2b256Hash("sampletext") + if err != nil { + t.Fatal(err) + } + + if string(hashVal) == "" || string(hashVal) == "sampletext" { + t.Fatalf("failed to hash the text") + } +} diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker_v1.go similarity index 84% rename from helper/storagepacker/storagepacker.go rename to helper/storagepacker/storagepacker_v1.go index 67c05b9b1b55e..cf5a6132a977c 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -17,16 +17,16 @@ import ( ) const ( - bucketCount = 256 - StoragePackerBucketsPrefix = "packer/buckets/" + bucketCount = 256 + DefaultStoragePackerBucketsPrefix = "packer/buckets/" ) -// StoragePacker packs the objects into a specific number of buckets by hashing -// its ID and indexing it. Currently this supports only 256 bucket entries and -// hence relies on the first byte of the hash value for indexing. The items -// that gets inserted into the packer should implement StorageBucketItem -// interface. -type StoragePacker struct { +// StoragePackerV1 packs the objects into a specific number of buckets by +// hashing its ID and indexing it. Currently this supports only 256 bucket +// entries and hence relies on the first byte of the hash value for indexing. +// This is only here for backwards compatibility. Use StoragePackerV2 for any +// newer implementations which allows for infinite storage capacity. +type StoragePackerV1 struct { view logical.Storage logger log.Logger storageLocks []*locksutil.LockEntry @@ -34,31 +34,31 @@ type StoragePacker struct { } // BucketPath returns the storage entry key for a given bucket key -func (s *StoragePacker) BucketPath(bucketKey string) string { +func (s *StoragePackerV1) BucketPath(bucketKey string) string { return s.viewPrefix + bucketKey } // BucketKeyHash returns the MD5 hash of the bucket storage key in which // the item will be stored. The choice of MD5 is only for hash performance // reasons since its value is not used for any security sensitive operation. -func (s *StoragePacker) BucketKeyHashByItemID(itemID string) string { +func (s *StoragePackerV1) BucketKeyHashByItemID(itemID string) string { return s.BucketKeyHashByKey(s.BucketPath(s.BucketKey(itemID))) } // BucketKeyHashByKey returns the MD5 hash of the bucket storage key -func (s *StoragePacker) BucketKeyHashByKey(bucketKey string) string { +func (s *StoragePackerV1) BucketKeyHashByKey(bucketKey string) string { hf := md5.New() hf.Write([]byte(bucketKey)) return hex.EncodeToString(hf.Sum(nil)) } // View returns the storage view configured to be used by the packer -func (s *StoragePacker) View() logical.Storage { +func (s *StoragePackerV1) View() logical.Storage { return s.view } // Get returns a bucket for a given key -func (s *StoragePacker) GetBucket(key string) (*Bucket, error) { +func (s *StoragePackerV1) GetBucket(key string) (*Bucket, error) { if key == "" { return nil, fmt.Errorf("missing bucket key") } @@ -129,20 +129,20 @@ func (s *Bucket) upsert(item *Item) error { } // BucketIndex returns the bucket key index for a given storage key -func (s *StoragePacker) BucketIndex(key string) uint8 { +func (s *StoragePackerV1) BucketIndex(key string) uint8 { hf := md5.New() hf.Write([]byte(key)) return uint8(hf.Sum(nil)[0]) } // BucketKey returns the bucket key for a given item ID -func (s *StoragePacker) BucketKey(itemID string) string { +func (s *StoragePackerV1) BucketKey(itemID string) string { return strconv.Itoa(int(s.BucketIndex(itemID))) } // DeleteItem removes the storage entry which the given key refers to from its // corresponding bucket. -func (s *StoragePacker) DeleteItem(itemID string) error { +func (s *StoragePackerV1) DeleteItem(itemID string) error { if itemID == "" { return fmt.Errorf("empty item ID") @@ -202,7 +202,7 @@ func (s *StoragePacker) DeleteItem(itemID string) error { } // Put stores a packed bucket entry -func (s *StoragePacker) PutBucket(bucket *Bucket) error { +func (s *StoragePackerV1) PutBucket(bucket *Bucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -241,7 +241,7 @@ func (s *StoragePacker) PutBucket(bucket *Bucket) error { // GetItem fetches the storage entry for a given key from its corresponding // bucket. -func (s *StoragePacker) GetItem(itemID string) (*Item, error) { +func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { if itemID == "" { return nil, fmt.Errorf("empty item ID") } @@ -269,7 +269,7 @@ func (s *StoragePacker) GetItem(itemID string) (*Item, error) { } // PutItem stores a storage entry in its corresponding bucket -func (s *StoragePacker) PutItem(item *Item) error { +func (s *StoragePackerV1) PutItem(item *Item) error { if item == nil { return fmt.Errorf("nil item") } @@ -329,14 +329,14 @@ func (s *StoragePacker) PutItem(item *Item) error { return s.PutBucket(bucket) } -// NewStoragePacker creates a new storage packer for a given view -func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePacker, error) { +// NewStoragePackerV1 creates a new storage packer for a given view +func NewStoragePackerV1(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePackerV1, error) { if view == nil { return nil, fmt.Errorf("nil view") } if viewPrefix == "" { - viewPrefix = StoragePackerBucketsPrefix + viewPrefix = DefaultStoragePackerBucketsPrefix } if !strings.HasSuffix(viewPrefix, "/") { @@ -344,7 +344,7 @@ func NewStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string } // Create a new packer object for the given view - packer := &StoragePacker{ + packer := &StoragePackerV1{ view: view, viewPrefix: viewPrefix, logger: logger.Named("storagepacker"), diff --git a/helper/storagepacker/storagepacker_test.go b/helper/storagepacker/storagepacker_v1_test.go similarity index 83% rename from helper/storagepacker/storagepacker_test.go rename to helper/storagepacker/storagepacker_v1_test.go index e7e3d5aa9cac5..560c729a4dfaf 100644 --- a/helper/storagepacker/storagepacker_test.go +++ b/helper/storagepacker/storagepacker_v1_test.go @@ -1,9 +1,9 @@ package storagepacker import ( + "reflect" "testing" - "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" @@ -11,8 +11,12 @@ import ( "github.com/hashicorp/vault/logical" ) -func BenchmarkStoragePacker(b *testing.B) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") +func BenchmarkStoragePackerV1(b *testing.B) { + storagePacker, err := NewStoragePackerV1( + &logical.InmemStorage{}, + log.New(&log.LoggerOptions{Name: "storagepackertest"}), + "", + ) if err != nil { b.Fatal(err) } @@ -60,8 +64,12 @@ func BenchmarkStoragePacker(b *testing.B) { } } -func TestStoragePacker(t *testing.T) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") +func TestStoragePackerV1(t *testing.T) { + storagePacker, err := NewStoragePackerV1( + &logical.InmemStorage{}, + log.New(&log.LoggerOptions{Name: "storagepackertest"}), + "", + ) if err != nil { t.Fatal(err) } @@ -106,8 +114,12 @@ func TestStoragePacker(t *testing.T) { } } -func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { - storagePacker, err := NewStoragePacker(&logical.InmemStorage{}, log.New(&log.LoggerOptions{Name: "storagepackertest"}), "") +func TestStoragePackerV1_SerializeDeserializeComplexItem_Version1(t *testing.T) { + storagePacker, err := NewStoragePackerV1( + &logical.InmemStorage{}, + log.New(&log.LoggerOptions{Name: "storagepackertest"}), + "", + ) if err != nil { t.Fatal(err) } @@ -166,7 +178,7 @@ func TestStoragePacker_SerializeDeserializeComplexItem(t *testing.T) { t.Fatal(err) } - if !proto.Equal(&itemDecoded, entity) { + if !reflect.DeepEqual(&itemDecoded, entity) { t.Fatalf("bad: expected: %#v\nactual: %#v\n", entity, itemDecoded) } } diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go new file mode 100644 index 0000000000000..01bc9eb37573f --- /dev/null +++ b/helper/storagepacker/storagepacker_v2.go @@ -0,0 +1,613 @@ +package storagepacker + +import ( + "context" + "fmt" + "math" + "strconv" + "strings" + "sync" + + radix "github.com/armon/go-radix" + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/vault/helper/strutil" + + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/cryptoutil" + "github.com/hashicorp/vault/logical" +) + +const ( + defaultBucketBaseCount = 256 + defaultBucketShardCount = 16 + // Larger size of the bucket size adversely affects the performance of the + // storage packer. Also, some of the backends impose a maximum size limit + // on the objects that gets persisted. For example, Consul imposes 512KB + // and DynamoDB imposes 400KB. Going forward, if there exists storage + // backends that has more constrained limits, this will have to become more + // flexible. For now, 380KB seems like a decent bargain. + defaultBucketMaxSize = 380 * 1024 +) + +type Config struct { + // View is the storage to be used by all the buckets + View logical.Storage + + // ViewPrefix is the prefix to be used for the buckets in the view + ViewPrefix string + + // Logger for output + Logger log.Logger + + // BucketBaseCount is the number of buckets to create at the base level + BucketBaseCount int + + // BucketShardCount is the number of sub-buckets a bucket gets sharded into + // when it reaches the maximum threshold + BucketShardCount int + + // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When + // the size of the bucket reaches a threshold relative to this limit, it + // gets sharded into the configured number of pieces incrementally. + BucketMaxSize int64 +} + +// StoragePackerV2 packs many items into abstractions called buckets. The goal +// is to employ a reduced number of storage entries for a relatively huge +// number of items. This is the second version of the utility which supports +// indefinitely expanding the capacity of the storage by sharding the buckets +// when they exceed the imposed limit. +type StoragePackerV2 struct { + config *Config + bucketsCache *radix.Tree +} + +// LockedBucket embeds a bucket and its corresponding lock to ensure thread +// safety +type LockedBucket struct { + *BucketV2 + lock sync.RWMutex +} + +// NewStoragePackerV2 creates a new storage packer for a given view +func NewStoragePackerV2(config *Config) (*StoragePackerV2, error) { + if config.View == nil { + return nil, fmt.Errorf("nil view") + } + + if config.ViewPrefix == "" { + config.ViewPrefix = DefaultStoragePackerBucketsPrefix + } + + if !strings.HasSuffix(config.ViewPrefix, "/") { + config.ViewPrefix = config.ViewPrefix + "/" + } + + if config.BucketBaseCount == 0 { + config.BucketBaseCount = defaultBucketBaseCount + } + + if config.BucketShardCount == 0 { + config.BucketShardCount = defaultBucketShardCount + } + + if config.BucketMaxSize == 0 { + config.BucketMaxSize = defaultBucketMaxSize + } + + if !isPowerOfTwo(config.BucketBaseCount) { + return nil, fmt.Errorf("bucket base count of %d is not a power of two", config.BucketBaseCount) + } + + if !isPowerOfTwo(config.BucketShardCount) { + return nil, fmt.Errorf("bucket shard count of %d is not a power of two", config.BucketShardCount) + } + + if config.BucketShardCount < 2 { + return nil, fmt.Errorf("bucket shard count should at least be 2") + } + + // Create a new packer object for the given view + packer := &StoragePackerV2{ + config: config, + bucketsCache: radix.New(), + } + + return packer, nil +} + +// Clone creates a replica of the bucket +func (b *BucketV2) Clone() (*BucketV2, error) { + if b == nil { + return nil, fmt.Errorf("nil bucket") + } + + marshaledBucket, err := proto.Marshal(b) + if err != nil { + return nil, fmt.Errorf("failed to marshal bucket: %v", err) + } + + var clonedBucket BucketV2 + err = proto.Unmarshal(marshaledBucket, &clonedBucket) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal bucket: %v", err) + } + + return &clonedBucket, nil +} + +// Get reads a bucket from the storage +func (s *StoragePackerV2) GetBucket(key string) (*LockedBucket, error) { + if key == "" { + return nil, fmt.Errorf("missing bucket key") + } + + raw, exists := s.bucketsCache.Get(key) + if exists { + return raw.(*LockedBucket), nil + } + + // Read from the underlying view + entry, err := s.config.View.Get(context.Background(), key) + if err != nil { + return nil, errwrap.Wrapf("failed to read bucket: {{err}}", err) + } + if entry == nil { + return nil, nil + } + + var bucket BucketV2 + err = proto.Unmarshal(entry.Value, &bucket) + if err != nil { + return nil, errwrap.Wrapf("failed to decode bucket: {{err}}", err) + } + + // Serializing and deserializing a proto message with empty map translates + // to a nil. Ensure that the required fields are initialized properly. + if bucket.Buckets == nil { + bucket.Buckets = make(map[string]*BucketV2) + } + if bucket.Items == nil { + bucket.Items = make(map[string]*Item) + } + + // Update the unencrypted size of the bucket + bucket.Size = int64(len(entry.Value)) + + lb := &LockedBucket{ + BucketV2: &bucket, + } + s.bucketsCache.Insert(bucket.Key, lb) + + return lb, nil +} + +// Put stores a bucket in storage +func (s *StoragePackerV2) PutBucket(bucket *LockedBucket) error { + if bucket == nil { + return fmt.Errorf("nil bucket entry") + } + + if bucket.Key == "" { + return fmt.Errorf("missing bucket key") + } + + if !strings.HasPrefix(bucket.Key, s.config.ViewPrefix) { + return fmt.Errorf("bucket entry key should have %q prefix", s.config.ViewPrefix) + } + + marshaledBucket, err := proto.Marshal(bucket.BucketV2) + if err != nil { + return err + } + + err = s.config.View.Put(context.Background(), &logical.StorageEntry{ + Key: bucket.Key, + Value: marshaledBucket, + }) + if err != nil { + return err + } + + bucket.Size = int64(len(marshaledBucket)) + + s.bucketsCache.Insert(bucket.Key, bucket) + + return nil +} + +// putItem is a recursive function that finds the appropriate bucket +// to store the item based on the storage space available in the buckets. +func (s *StoragePackerV2) putItem(bucket *LockedBucket, item *Item, depth int) (string, error) { + // Bucket will be nil for the first time when its not known which base + // level bucket the item belongs to. + if bucket == nil { + // Enforce zero depth + depth = 0 + + // Compute the index of the base bucket + baseIndex, err := s.baseBucketIndex(item.ID) + if err != nil { + return "", err + } + + // Prepend the index with the prefix + baseKey := s.config.ViewPrefix + baseIndex + + // Check if the base bucket exists + bucket, err = s.GetBucket(baseKey) + if err != nil { + return "", err + } + + // If the base bucket does not exist, create one + if bucket == nil { + bucket = s.newBucket(baseKey) + } + } + + // Compute the shard index to which the item belongs + shardIndex, err := s.shardBucketIndex(item.ID, depth) + if err != nil { + return "", errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) + } + shardKey := bucket.Key + "/" + shardIndex + + // Acquire lock on the bucket + bucket.lock.Lock() + + if bucket.Sharded { + // If the bucket is already sharded out, release the lock and continue + // insertion at the next level. + bucket.lock.Unlock() + shardedBucket, err := s.GetBucket(shardKey) + if err != nil { + return "", err + } + if shardedBucket == nil { + shardedBucket = s.newBucket(shardKey) + } + return s.putItem(shardedBucket, item, depth+1) + } + + // From this point on, the item may get inserted either in the current + // bucket or at its next level. In both cases, there will be a need to + // persist the current bucket. Hence the lock on the current bucket is + // deferred. + defer bucket.lock.Unlock() + + // Check if a bucket shard is already present for the shard index. If not, + // create one. + bucketShard, ok := bucket.Buckets[shardIndex] + if !ok { + bucketShard = s.newBucket(shardKey).BucketV2 + bucket.Buckets[shardIndex] = bucketShard + } + + // Check if the insertion of the item makes the bucket size exceed the + // limit. + exceedsLimit, err := s.bucketExceedsSizeLimit(bucket, item) + if err != nil { + return "", err + } + + // If the bucket size after addition of the item doesn't exceed the limit, + // insert the item persist the bucket. + if !exceedsLimit { + bucketShard.Items[item.ID] = item + return bucket.Key, s.PutBucket(bucket) + } + + // The bucket size after addition of the item exceeds the size limit. Split + // the bucket into shards. + err = s.splitBucket(bucket, depth) + if err != nil { + return "", err + } + + shardedBucket, err := s.GetBucket(bucketShard.Key) + if err != nil { + return "", err + } + + bucketKey, err := s.putItem(shardedBucket, item, depth+1) + if err != nil { + return "", err + } + + return bucketKey, s.PutBucket(bucket) +} + +// getItem is a recursive function that fetches the given item ID in +// the bucket hierarchy +func (s *StoragePackerV2) getItem(bucket *LockedBucket, itemID string, depth int) (*Item, error) { + if bucket == nil { + // Enforce zero depth + depth = 0 + + baseIndex, err := s.baseBucketIndex(itemID) + if err != nil { + return nil, err + } + + bucket, err = s.GetBucket(s.config.ViewPrefix + baseIndex) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) + } + } + + if bucket == nil { + return nil, nil + } + + shardIndex, err := s.shardBucketIndex(itemID, depth) + if err != nil { + return nil, errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) + } + + shardKey := bucket.Key + "/" + shardIndex + + bucket.lock.RLock() + + if bucket.Sharded { + bucket.lock.RUnlock() + shardedBucket, err := s.GetBucket(shardKey) + if err != nil { + return nil, err + } + if shardedBucket == nil { + return nil, nil + } + return s.getItem(shardedBucket, itemID, depth+1) + } + + defer bucket.lock.RUnlock() + + bucketShard, ok := bucket.Buckets[shardIndex] + if !ok { + return nil, nil + } + + if bucketShard == nil { + return nil, nil + } + + return bucketShard.Items[itemID], nil +} + +// deleteItem is a recursive function that finds the bucket holding +// the item and removes the item from it +func (s *StoragePackerV2) deleteItem(bucket *LockedBucket, itemID string, depth int) error { + if bucket == nil { + // Enforce zero depth + depth = 0 + + baseIndex, err := s.baseBucketIndex(itemID) + if err != nil { + return err + } + + bucket, err = s.GetBucket(s.config.ViewPrefix + baseIndex) + if err != nil { + return errwrap.Wrapf("failed to read packed storage item: {{err}}", err) + } + } + + if bucket == nil { + return nil + } + + shardIndex, err := s.shardBucketIndex(itemID, depth) + if err != nil { + return errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) + } + + shardKey := bucket.Key + "/" + shardIndex + + bucket.lock.Lock() + + if bucket.Sharded { + bucket.lock.Unlock() + shardedBucket, err := s.GetBucket(shardKey) + if err != nil { + return err + } + if shardedBucket == nil { + return nil + } + return s.deleteItem(shardedBucket, itemID, depth+1) + } + + defer bucket.lock.Unlock() + + bucketShard, ok := bucket.Buckets[shardIndex] + if !ok { + return nil + } + + if bucketShard == nil { + return nil + } + + delete(bucketShard.Items, itemID) + + return s.PutBucket(bucket) +} + +// GetItem fetches the item using the given item identifier +func (s *StoragePackerV2) GetItem(itemID string) (*Item, error) { + if itemID == "" { + return nil, fmt.Errorf("empty item ID") + } + + return s.getItem(nil, itemID, 0) +} + +// PutItem persists the given item +func (s *StoragePackerV2) PutItem(item *Item) (string, error) { + if item == nil { + return "", fmt.Errorf("nil item") + } + + if item.ID == "" { + return "", fmt.Errorf("missing ID in item") + } + + bucketKey, err := s.putItem(nil, item, 0) + if err != nil { + return "", err + } + + return bucketKey, nil +} + +// DeleteItem removes the item using the given item identifier +func (s *StoragePackerV2) DeleteItem(itemID string) error { + if itemID == "" { + return fmt.Errorf("empty item ID") + } + + return s.deleteItem(nil, itemID, 0) +} + +// bucketExceedsSizeLimit computes if the given bucket is exceeding the +// configured size limit on the storage packer +func (s *StoragePackerV2) bucketExceedsSizeLimit(bucket *LockedBucket, item *Item) (bool, error) { + marshaledItem, err := proto.Marshal(item) + if err != nil { + return false, fmt.Errorf("failed to marshal item: %v", err) + } + + expectedBucketSize := bucket.Size + int64(len(marshaledItem)) + + // The objects that leave storage packer to get persisted get inflated due + // to extra bits coming off of encryption. So, we consider the bucket to be + // full much earlier to compensate for the encryption overhead. Testing + // with the threshold of 70% of the max size resulted in object sizes + // coming dangerously close to the actual limit. Hence, setting 60% as the + // cut-off value. This is purely a heuristic threshold. + max := math.Ceil((float64(s.config.BucketMaxSize) * float64(60)) / float64(100)) + + return float64(expectedBucketSize) > max, nil +} + +func (s *StoragePackerV2) splitBucket(bucket *LockedBucket, depth int) error { + for _, shard := range bucket.Buckets { + for itemID, item := range shard.Items { + if shard.Buckets == nil { + shard.Buckets = make(map[string]*BucketV2) + } + subShardIndex, err := s.shardBucketIndex(itemID, depth+1) + if err != nil { + return err + } + subShard, ok := shard.Buckets[subShardIndex] + if !ok { + subShardKey := shard.Key + "/" + subShardIndex + subShard = s.newBucket(subShardKey).BucketV2 + shard.Buckets[subShardIndex] = subShard + } + subShard.Items[itemID] = item + } + + shard.Items = nil + err := s.PutBucket(&LockedBucket{BucketV2: shard}) + if err != nil { + return err + } + } + bucket.Buckets = nil + bucket.Sharded = true + return nil +} + +// baseBucketIndex returns the index of the base bucket to which the +// given item belongs +func (s *StoragePackerV2) baseBucketIndex(itemID string) (string, error) { + // Hash the item ID + hashVal, err := cryptoutil.Blake2b256Hash(itemID) + if err != nil { + return "", err + } + + // Extract the index value of the base bucket from the hash of the item ID + return strutil.BitMaskedIndexHex(hashVal, bitsNeeded(s.config.BucketBaseCount)) +} + +// shardBucketIndex returns the index of the bucket shard to which the given +// item belongs at a particular depth. +func (s *StoragePackerV2) shardBucketIndex(itemID string, depth int) (string, error) { + // Hash the item ID + hashVal, err := cryptoutil.Blake2b256Hash(itemID) + if err != nil { + return "", err + } + + // Compute the bits required to enumerate base buckets + shardsBitCount := bitsNeeded(s.config.BucketShardCount) + + // Compute the bits that are already consumed by the base bucket and the + // shards at previous levels. + ignoreBits := bitsNeeded(s.config.BucketBaseCount) + depth*shardsBitCount + + // Extract the index value of the bucket shard from the hash of the item ID + return strutil.BitMaskedIndexHex(hashVal[ignoreBits:], shardsBitCount) +} + +// bitsNeeded returns the minimum number of bits required to enumerate the +// natural numbers below the given value +func bitsNeeded(value int) int { + return int(math.Ceil(math.Log2(float64(value)))) +} + +func (s *StoragePackerV2) newBucket(key string) *LockedBucket { + return &LockedBucket{ + BucketV2: &BucketV2{ + Key: key, + Buckets: make(map[string]*BucketV2), + Items: make(map[string]*Item), + }, + } +} + +func isPowerOfTwo(val int) bool { + return val != 0 && (val&(val-1) == 0) +} + +type BucketWalkFunc func(item *Item) error + +// BucketWalk is a pre-order traversal of the bucket hierarchy starting from +// the bucket corresponding to the given key. The function fn will be called on +// all the items in the hierarchy. +func (s *StoragePackerV2) BucketWalk(key string, fn BucketWalkFunc) error { + bucket, err := s.GetBucket(key) + if err != nil { + return err + } + if bucket == nil { + return nil + } + + if !bucket.Sharded { + for _, b := range bucket.Buckets { + for _, item := range b.Items { + err := fn(item) + if err != nil { + return err + } + } + } + return nil + } + + for i := 0; i < s.config.BucketShardCount; i++ { + shardKey := bucket.Key + "/" + strconv.FormatInt(int64(i), 16) + err = s.BucketWalk(shardKey, fn) + if err != nil { + return err + } + } + + return nil +} diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.go new file mode 100644 index 0000000000000..6ecad91d4a46e --- /dev/null +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -0,0 +1,190 @@ +package storagepacker + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "testing" + + "github.com/golang/protobuf/ptypes" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/logical" + + log "github.com/hashicorp/go-hclog" +) + +const ( + testIterationCount = 30 + testBucketBaseCount = defaultBucketBaseCount + testBucketShardCount = defaultBucketShardCount + testBucketMaxSize = defaultBucketMaxSize +) + +func TestStoragePackerV2_Inmem(t *testing.T) { + sp, err := NewStoragePackerV2(&Config{ + BucketBaseCount: testBucketBaseCount, + BucketShardCount: testBucketShardCount, + BucketMaxSize: testBucketMaxSize, + View: &logical.InmemStorage{}, + Logger: logging.NewVaultLogger(log.Trace), + }) + if err != nil { + t.Fatal(err) + } + + entity := &identity.Entity{ + Metadata: map[string]string{ + "samplekey1": "samplevalue1", + "samplekey2": "samplevalue2", + "samplekey3": "samplevalue3", + "samplekey4": "samplevalue4", + "samplekey5": "samplevalue5", + }, + } + testPutItem(t, sp, entity) + testGetItem(t, sp, false) + testDeleteItem(t, sp) + testGetItem(t, sp, true) +} + +func TestStoragePackerV2_File(t *testing.T) { + filePath, err := ioutil.TempDir("", "vault") + if err != nil { + t.Fatalf("err: %s", err) + } + //fmt.Printf("filePath: %q\n", filePath) + defer os.RemoveAll(filePath) + + logger := logging.NewVaultLogger(log.Trace) + + config := map[string]string{ + "path": filePath, + } + + storage, err := logical.NewLogicalStorage(logical.LogicalTypeFile, config, logger) + if err != nil { + t.Fatal(err) + } + + sp, err := NewStoragePackerV2(&Config{ + BucketBaseCount: testBucketBaseCount, + BucketShardCount: testBucketShardCount, + BucketMaxSize: testBucketMaxSize, + View: storage, + Logger: logger, + }) + if err != nil { + t.Fatal(err) + } + + entity := &identity.Entity{ + Metadata: map[string]string{ + "samplekey1": "samplevalue1", + "samplekey2": "samplevalue2", + "samplekey3": "samplevalue3", + "samplekey4": "samplevalue4", + "samplekey5": "samplevalue5", + }, + } + + testPutItem(t, sp, entity) + testGetItem(t, sp, false) + testDeleteItem(t, sp) + testGetItem(t, sp, true) +} + +func TestStoragePackerV2_isPowerOfTwo(t *testing.T) { + powersOfTwo := []int{1, 2, 4, 1024, 4096} + notPowersOfTwo := []int{0, 3, 5, 1000, 1023, 4095, 4097, 10000} + for _, val := range powersOfTwo { + if !isPowerOfTwo(val) { + t.Fatalf("%d is a power of two", val) + } + } + for _, val := range notPowersOfTwo { + if isPowerOfTwo(val) { + t.Fatalf("%d is not a power of two", val) + } + } +} + +func testPutItem(t *testing.T, sp *StoragePackerV2, entity *identity.Entity) { + t.Helper() + for i := 1; i <= testIterationCount; i++ { + if i%500 == 0 { + fmt.Printf("put item iteration: %d\n", i) + } + id := strconv.Itoa(i) + entity.ID = id + + marshaledMessage, err := ptypes.MarshalAny(entity) + if err != nil { + t.Fatal(err) + } + + item := &Item{ + ID: id, + Message: marshaledMessage, + } + if err != nil { + t.Fatal(err) + } + + _, err = sp.PutItem(item) + if err != nil { + t.Fatal(err) + } + } +} + +func testGetItem(t *testing.T, sp *StoragePackerV2, expectNil bool) { + t.Helper() + for i := 1; i <= testIterationCount; i++ { + if i%500 == 0 { + fmt.Printf("get item iteration: %d\n", i) + } + id := strconv.Itoa(i) + + itemFetched, err := sp.GetItem(id) + if err != nil { + t.Fatal(err) + } + + switch expectNil { + case itemFetched == nil: + return + default: + t.Fatalf("expected nil for item %q\n", id) + } + + if itemFetched == nil { + t.Fatalf("failed to read the inserted item %q", id) + } + + var fetchedMessage identity.Entity + err = ptypes.UnmarshalAny(itemFetched.Message, &fetchedMessage) + if err != nil { + t.Fatal(err) + } + + if fetchedMessage.ID != id { + t.Fatalf("failed to fetch item ID: %q\n", id) + } + } +} + +func testDeleteItem(t *testing.T, sp *StoragePackerV2) { + t.Helper() + for i := 1; i <= testIterationCount; i++ { + if i%500 == 0 { + fmt.Printf("delete item iteration: %d\n", i) + } + id := strconv.Itoa(i) + err := sp.DeleteItem(id) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index e5cc30d2cb0d1..c6b4e0db82b7b 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -19,8 +19,11 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +// Item represents a entry that gets inserted into the storage packer type Item struct { - ID string `sentinel:"" protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // ID is the UUID to identify the item + ID string `sentinel:"" protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // message is the contents of the item Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -31,7 +34,7 @@ func (m *Item) Reset() { *m = Item{} } func (m *Item) String() string { return proto.CompactTextString(m) } func (*Item) ProtoMessage() {} func (*Item) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a65077c1fb226c53, []int{0} + return fileDescriptor_types_062eae6dce2c607c, []int{0} } func (m *Item) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Item.Unmarshal(m, b) @@ -65,6 +68,87 @@ func (m *Item) GetMessage() *any.Any { return nil } +// BucketV2 is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +type BucketV2 struct { + // Key is the storage path where the bucket gets stored + Key string `sentinel:"" protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + // Items holds the items contained within this bucket + Items map[string]*Item `sentinel:"" protobuf:"bytes,2,rep,name=items" json:"items,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Buckets are the buckets contained within this bucket + Buckets map[string]*BucketV2 `sentinel:"" protobuf:"bytes,3,rep,name=buckets" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Sharded indicates if the contained buckets are pushed out or not + Sharded bool `sentinel:"" protobuf:"varint,4,opt,name=sharded" json:"sharded,omitempty"` + // Size of this bucket in number of bytes + Size int64 `sentinel:"" protobuf:"varint,5,opt,name=size" json:"size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BucketV2) Reset() { *m = BucketV2{} } +func (m *BucketV2) String() string { return proto.CompactTextString(m) } +func (*BucketV2) ProtoMessage() {} +func (*BucketV2) Descriptor() ([]byte, []int) { + return fileDescriptor_types_062eae6dce2c607c, []int{1} +} +func (m *BucketV2) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BucketV2.Unmarshal(m, b) +} +func (m *BucketV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BucketV2.Marshal(b, m, deterministic) +} +func (dst *BucketV2) XXX_Merge(src proto.Message) { + xxx_messageInfo_BucketV2.Merge(dst, src) +} +func (m *BucketV2) XXX_Size() int { + return xxx_messageInfo_BucketV2.Size(m) +} +func (m *BucketV2) XXX_DiscardUnknown() { + xxx_messageInfo_BucketV2.DiscardUnknown(m) +} + +var xxx_messageInfo_BucketV2 proto.InternalMessageInfo + +func (m *BucketV2) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *BucketV2) GetItems() map[string]*Item { + if m != nil { + return m.Items + } + return nil +} + +func (m *BucketV2) GetBuckets() map[string]*BucketV2 { + if m != nil { + return m.Buckets + } + return nil +} + +func (m *BucketV2) GetSharded() bool { + if m != nil { + return m.Sharded + } + return false +} + +func (m *BucketV2) GetSize() int64 { + if m != nil { + return m.Size + } + return 0 +} + type Bucket struct { Key string `sentinel:"" protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` @@ -77,7 +161,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_types_a65077c1fb226c53, []int{1} + return fileDescriptor_types_062eae6dce2c607c, []int{2} } func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) @@ -113,23 +197,33 @@ func (m *Bucket) GetItems() []*Item { func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") + proto.RegisterType((*BucketV2)(nil), "storagepacker.BucketV2") + proto.RegisterMapType((map[string]*BucketV2)(nil), "storagepacker.BucketV2.BucketsEntry") + proto.RegisterMapType((map[string]*Item)(nil), "storagepacker.BucketV2.ItemsEntry") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") } -func init() { proto.RegisterFile("types.proto", fileDescriptor_types_a65077c1fb226c53) } - -var fileDescriptor_types_a65077c1fb226c53 = []byte{ - // 181 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, - 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x2e, 0xc9, 0x2f, 0x4a, 0x4c, 0x4f, - 0x2d, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x92, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, - 0x4b, 0x26, 0x95, 0xa6, 0xe9, 0x27, 0xe6, 0x55, 0x42, 0x54, 0x2a, 0xb9, 0x71, 0xb1, 0x78, 0x96, - 0xa4, 0xe6, 0x0a, 0xf1, 0x71, 0x31, 0x65, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, - 0x65, 0xa6, 0x08, 0xe9, 0x71, 0xb1, 0xe7, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x4a, 0x30, 0x29, - 0x30, 0x6a, 0x70, 0x1b, 0x89, 0xe8, 0x41, 0x0c, 0xd1, 0x83, 0x19, 0xa2, 0xe7, 0x98, 0x57, 0x19, - 0x04, 0x53, 0xa4, 0xe4, 0xca, 0xc5, 0xe6, 0x54, 0x9a, 0x9c, 0x9d, 0x5a, 0x22, 0x24, 0xc0, 0xc5, - 0x9c, 0x9d, 0x5a, 0x09, 0x35, 0x0a, 0xc4, 0x14, 0xd2, 0xe4, 0x62, 0xcd, 0x2c, 0x49, 0xcd, 0x2d, - 0x96, 0x60, 0x52, 0x60, 0xd6, 0xe0, 0x36, 0x12, 0xd6, 0x43, 0x71, 0x9d, 0x1e, 0xc8, 0xfe, 0x20, - 0x88, 0x8a, 0x24, 0x36, 0xb0, 0xe9, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x01, 0x30, 0x77, - 0x9a, 0xce, 0x00, 0x00, 0x00, +func init() { proto.RegisterFile("types.proto", fileDescriptor_types_062eae6dce2c607c) } + +var fileDescriptor_types_062eae6dce2c607c = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xcf, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0x49, 0xba, 0x5f, 0xbe, 0xa9, 0x48, 0x14, 0x8c, 0x3b, 0x95, 0xe2, 0xa1, 0x1e, 0xcc, + 0x60, 0x5e, 0x86, 0x07, 0x41, 0x61, 0x82, 0x07, 0x2f, 0x11, 0xbc, 0x67, 0xeb, 0xb3, 0x96, 0xfd, + 0xe8, 0x68, 0x52, 0xa1, 0xfe, 0xc5, 0xfe, 0x19, 0xd2, 0x64, 0xc1, 0x4e, 0xba, 0xdb, 0x6b, 0xf3, + 0xfd, 0x7c, 0xf9, 0xe4, 0x05, 0x86, 0xa6, 0xda, 0xa2, 0x16, 0xdb, 0x22, 0x37, 0x39, 0x3b, 0xd1, + 0x26, 0x2f, 0x54, 0x8a, 0x5b, 0xb5, 0x58, 0x62, 0x31, 0xba, 0x4a, 0xf3, 0x3c, 0x5d, 0xe1, 0xd8, + 0x1e, 0xce, 0xcb, 0x8f, 0xb1, 0xda, 0x54, 0x2e, 0x19, 0x3d, 0x43, 0xe7, 0xc5, 0xe0, 0x9a, 0x9d, + 0x02, 0xcd, 0x12, 0x4e, 0x42, 0x12, 0x1f, 0x49, 0x9a, 0x25, 0x4c, 0x40, 0x7f, 0x8d, 0x5a, 0xab, + 0x14, 0x39, 0x0d, 0x49, 0x3c, 0x9c, 0x5c, 0x08, 0x57, 0x22, 0x7c, 0x89, 0x78, 0xdc, 0x54, 0xd2, + 0x87, 0xa2, 0x1f, 0x0a, 0x83, 0xa7, 0x72, 0xb1, 0x44, 0xf3, 0x3e, 0x61, 0x67, 0x10, 0x2c, 0xb1, + 0xda, 0xb5, 0xd5, 0x23, 0x9b, 0x42, 0x37, 0x33, 0xb8, 0xd6, 0x9c, 0x86, 0x41, 0x3c, 0x9c, 0x44, + 0x62, 0x4f, 0x50, 0x78, 0x52, 0xd4, 0x2e, 0x7a, 0xb6, 0x31, 0x45, 0x25, 0x1d, 0xc0, 0x1e, 0xa0, + 0x3f, 0xb7, 0xa7, 0x9a, 0x07, 0x96, 0xbd, 0x3e, 0xc4, 0xba, 0x61, 0x47, 0x7b, 0x88, 0x71, 0xe8, + 0xeb, 0x4f, 0x55, 0x24, 0x98, 0xf0, 0x4e, 0x48, 0xe2, 0x81, 0xf4, 0x9f, 0x8c, 0x41, 0x47, 0x67, + 0xdf, 0xc8, 0xbb, 0x21, 0x89, 0x03, 0x69, 0xe7, 0xd1, 0x2b, 0xc0, 0x9f, 0x42, 0xcb, 0x3d, 0x6e, + 0xa0, 0xfb, 0xa5, 0x56, 0xa5, 0x5f, 0xca, 0xf9, 0x3f, 0x97, 0x9a, 0x95, 0x2e, 0x71, 0x4f, 0xa7, + 0x64, 0xf4, 0x06, 0xc7, 0x4d, 0xab, 0x96, 0xc2, 0xdb, 0xfd, 0xc2, 0xcb, 0x03, 0x97, 0x6b, 0x94, + 0x46, 0x33, 0xe8, 0xb9, 0xdf, 0xed, 0x7e, 0xcd, 0x3d, 0xb7, 0xfb, 0xd9, 0xc4, 0xbc, 0x67, 0x1f, + 0xf2, 0xee, 0x37, 0x00, 0x00, 0xff, 0xff, 0x68, 0xf6, 0x11, 0xaa, 0x39, 0x02, 0x00, 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 11c386b002599..a8f35d91a5055 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -4,12 +4,34 @@ package storagepacker; import "google/protobuf/any.proto"; +// Item represents a entry that gets inserted into the storage packer message Item { - string id = 1; - google.protobuf.Any message = 2; + // ID is the UUID to identify the item + string id = 1; + // message is the contents of the item + google.protobuf.Any message = 2; +} + +// BucketV2 is a construct to hold multiple items within itself. This +// abstraction contains multiple buckets of the same kind within itself and +// shares amont them the items that get inserted. When the bucket as a whole +// gets too big to hold more items, the contained buckets gets pushed out only +// to become independent buckets. Hence, this can grow infinitely in terms of +// storage space for items that get inserted. +message BucketV2{ + // Key is the storage path where the bucket gets stored + string key = 1; + // Items holds the items contained within this bucket + map items = 2; + // Buckets are the buckets contained within this bucket + map buckets = 3; + // Sharded indicates if the contained buckets are pushed out or not + bool sharded = 4; + // Size of this bucket in number of bytes + int64 size = 5; } message Bucket { - string key = 1; - repeated Item items = 2; + string key = 1; + repeated Item items = 2; } diff --git a/helper/strutil/strutil.go b/helper/strutil/strutil.go index a77e60d155e8e..d578cba0631d7 100644 --- a/helper/strutil/strutil.go +++ b/helper/strutil/strutil.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "sort" + "strconv" "strings" "github.com/hashicorp/errwrap" @@ -325,3 +326,46 @@ func AppendIfMissing(slice []string, i string) []string { } return append(slice, i) } + +// BitMaskedIndex returns the integer value formed from the given number of most +// significant bits of a given byte slice. +func BitMaskedIndex(input []byte, bitCount int) (int64, error) { + switch { + case len(input) == 0: + return -1, fmt.Errorf("input length is zero") + case bitCount <= 0: + return -1, fmt.Errorf("bit count zero or negative") + case bitCount > len(input)*8: + return -1, fmt.Errorf("input is shorter for the given bit count") + } + + if bitCount < 8 { + return int64(uint8(input[0]) >> uint8(8-bitCount)), nil + } + + decimalVal := int64(uint8(input[0])) + input = input[1:] + bitCount -= 8 + + for bitCount > 8 { + decimalVal = decimalVal*256 + int64(int(input[0])) + bitCount -= 8 + input = input[1:] + } + + decimalVal = decimalVal << uint8(bitCount) + decimalVal += int64(uint8(input[0]) >> uint8(8-bitCount)) + + return decimalVal, nil +} + +// BitMaskedIndexHex returnes the hex value formed from the given number of +// most significant bits of a given byte slice. +func BitMaskedIndexHex(input []byte, bitCount int) (string, error) { + index, err := BitMaskedIndex(input, bitCount) + if err != nil { + return "", err + } + // Convert the value to hex + return strconv.FormatInt(index, 16), nil +} diff --git a/logical/logical_storage.go b/logical/logical_storage.go new file mode 100644 index 0000000000000..eedc9e42fe6ed --- /dev/null +++ b/logical/logical_storage.go @@ -0,0 +1,82 @@ +package logical + +import ( + "context" + "fmt" + + log "github.com/hashicorp/go-hclog" + + "github.com/hashicorp/vault/physical" + "github.com/hashicorp/vault/physical/file" + "github.com/hashicorp/vault/physical/inmem" +) + +type LogicalType string + +const ( + LogicalTypeInmem LogicalType = "inmem" + LogicalTypeFile LogicalType = "file" +) + +type LogicalStorage struct { + logicalType LogicalType + underlying physical.Backend +} + +func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { + entry, err := s.underlying.Get(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return nil, nil + } + return &StorageEntry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }, nil +} + +func (s *LogicalStorage) Put(ctx context.Context, entry *StorageEntry) error { + return s.underlying.Put(ctx, &physical.Entry{ + Key: entry.Key, + Value: entry.Value, + SealWrap: entry.SealWrap, + }) +} + +func (s *LogicalStorage) Delete(ctx context.Context, key string) error { + return s.underlying.Delete(ctx, key) +} + +func (s *LogicalStorage) List(ctx context.Context, prefix string) ([]string, error) { + return s.underlying.List(ctx, prefix) +} + +func (s *LogicalStorage) Underlying() physical.Backend { + return s.underlying +} + +func NewLogicalStorage(logicalType LogicalType, config map[string]string, logger log.Logger) (*LogicalStorage, error) { + s := &LogicalStorage{ + logicalType: logicalType, + } + var err error + switch logicalType { + case LogicalTypeInmem: + s.underlying, err = inmem.NewInmem(nil, nil) + if err != nil { + return nil, err + } + case LogicalTypeFile: + s.underlying, err = file.NewFileBackend(config, logger) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unsupported logical type %q", logicalType) + } + + return s, nil +} diff --git a/vault/identity_store.go b/vault/identity_store.go index 6faddace586d1..68d4bb7c74afb 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -42,12 +42,12 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo core: core, } - iStore.entityPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, "") + iStore.entityPacker, err = storagepacker.NewStoragePackerV1(iStore.view, iStore.logger, "") if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } - iStore.groupPacker, err = storagepacker.NewStoragePacker(iStore.view, iStore.logger, groupBucketsPrefix) + iStore.groupPacker, err = storagepacker.NewStoragePackerV1(iStore.view, iStore.logger, groupBucketsPrefix) if err != nil { return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) } @@ -82,7 +82,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { switch { // Check if the key is a storage entry key for an entity bucket - case strings.HasPrefix(key, storagepacker.StoragePackerBucketsPrefix): + case strings.HasPrefix(key, storagepacker.DefaultStoragePackerBucketsPrefix): // Get the hash value of the storage bucket entry key bucketKeyHash := i.entityPacker.BucketKeyHashByKey(key) if len(bucketKeyHash) == 0 { diff --git a/vault/identity_store_structs.go b/vault/identity_store_structs.go index 0f9435cf7fc3d..63826e198cbaf 100644 --- a/vault/identity_store_structs.go +++ b/vault/identity_store_structs.go @@ -64,11 +64,11 @@ type IdentityStore struct { // entityPacker is used to pack multiple entity storage entries into 256 // buckets - entityPacker *storagepacker.StoragePacker + entityPacker *storagepacker.StoragePackerV1 // groupPacker is used to pack multiple group storage entries into 256 // buckets - groupPacker *storagepacker.StoragePacker + groupPacker *storagepacker.StoragePackerV1 // core is the pointer to Vault's core core *Core diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index e8b4cc1b0a0d2..37d72dab2642f 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -94,7 +94,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { func (i *IdentityStore) loadEntities(ctx context.Context) error { // Accumulate existing entities i.logger.Debug("loading entities") - existing, err := i.entityPacker.View().List(ctx, storagepacker.StoragePackerBucketsPrefix) + existing, err := i.entityPacker.View().List(ctx, storagepacker.DefaultStoragePackerBucketsPrefix) if err != nil { return errwrap.Wrapf("failed to scan for entities: {{err}}", err) } From c62dc6b39170e21a6f25f19c814fde13aa1bff29 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Mon, 7 May 2018 10:54:34 -0400 Subject: [PATCH 02/38] Added packer walk --- helper/storagepacker/storagepacker_v2.go | 23 +++++++++++++++---- helper/storagepacker/storagepacker_v2_test.go | 4 ++-- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 01bc9eb37573f..94b4a861de3bd 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -40,11 +40,12 @@ type Config struct { // Logger for output Logger log.Logger - // BucketBaseCount is the number of buckets to create at the base level + // BucketBaseCount is the number of buckets to create at the base level. + // The value should be a power of 2. BucketBaseCount int // BucketShardCount is the number of sub-buckets a bucket gets sharded into - // when it reaches the maximum threshold + // when it reaches the maximum threshold. The value should be a power of 2. BucketShardCount int // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When @@ -575,12 +576,26 @@ func isPowerOfTwo(val int) bool { return val != 0 && (val&(val-1) == 0) } -type BucketWalkFunc func(item *Item) error +type WalkFunc func(item *Item) error + +// Walk traverses through all the buckets and all the items in each bucket and +// invokes the given function on each item. +func (s *StoragePackerV2) Walk(fn WalkFunc) error { + var err error + for base := 0; base < s.config.BucketBaseCount; base++ { + baseKey := s.config.ViewPrefix + strconv.FormatInt(int64(base), 16) + err = s.BucketWalk(baseKey, fn) + if err != nil { + return err + } + } + return nil +} // BucketWalk is a pre-order traversal of the bucket hierarchy starting from // the bucket corresponding to the given key. The function fn will be called on // all the items in the hierarchy. -func (s *StoragePackerV2) BucketWalk(key string, fn BucketWalkFunc) error { +func (s *StoragePackerV2) BucketWalk(key string, fn WalkFunc) error { bucket, err := s.GetBucket(key) if err != nil { return err diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.go index 6ecad91d4a46e..6a7adca7f9cfd 100644 --- a/helper/storagepacker/storagepacker_v2_test.go +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -16,7 +16,7 @@ import ( ) const ( - testIterationCount = 30 + testIterationCount = 5000 testBucketBaseCount = defaultBucketBaseCount testBucketShardCount = defaultBucketShardCount testBucketMaxSize = defaultBucketMaxSize @@ -154,7 +154,7 @@ func testGetItem(t *testing.T, sp *StoragePackerV2, expectNil bool) { switch expectNil { case itemFetched == nil: - return + continue default: t.Fatalf("expected nil for item %q\n", id) } From bb2ae178a6506888abf71a159405f450dc196ce9 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Mon, 7 May 2018 11:12:50 -0400 Subject: [PATCH 03/38] test for walk func --- helper/storagepacker/storagepacker_v2.go | 8 ++-- helper/storagepacker/storagepacker_v2_test.go | 37 +++++++++++++++++++ 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 94b4a861de3bd..57fbd8e660cbe 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -584,7 +584,7 @@ func (s *StoragePackerV2) Walk(fn WalkFunc) error { var err error for base := 0; base < s.config.BucketBaseCount; base++ { baseKey := s.config.ViewPrefix + strconv.FormatInt(int64(base), 16) - err = s.BucketWalk(baseKey, fn) + err = s.bucketWalk(baseKey, fn) if err != nil { return err } @@ -592,10 +592,10 @@ func (s *StoragePackerV2) Walk(fn WalkFunc) error { return nil } -// BucketWalk is a pre-order traversal of the bucket hierarchy starting from +// bucketWalk is a pre-order traversal of the bucket hierarchy starting from // the bucket corresponding to the given key. The function fn will be called on // all the items in the hierarchy. -func (s *StoragePackerV2) BucketWalk(key string, fn WalkFunc) error { +func (s *StoragePackerV2) bucketWalk(key string, fn WalkFunc) error { bucket, err := s.GetBucket(key) if err != nil { return err @@ -618,7 +618,7 @@ func (s *StoragePackerV2) BucketWalk(key string, fn WalkFunc) error { for i := 0; i < s.config.BucketShardCount; i++ { shardKey := bucket.Key + "/" + strconv.FormatInt(int64(i), 16) - err = s.BucketWalk(shardKey, fn) + err = s.bucketWalk(shardKey, fn) if err != nil { return err } diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.go index 6a7adca7f9cfd..5c9ce168abc69 100644 --- a/helper/storagepacker/storagepacker_v2_test.go +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -22,6 +22,43 @@ const ( testBucketMaxSize = defaultBucketMaxSize ) +func TestStoragePackerV2_Walk(t *testing.T) { + sp, err := NewStoragePackerV2(&Config{ + BucketBaseCount: testBucketBaseCount, + BucketShardCount: testBucketShardCount, + BucketMaxSize: testBucketMaxSize, + View: &logical.InmemStorage{}, + Logger: logging.NewVaultLogger(log.Trace), + }) + if err != nil { + t.Fatal(err) + } + + entity := &identity.Entity{ + Metadata: map[string]string{ + "samplekey1": "samplevalue1", + "samplekey2": "samplevalue2", + "samplekey3": "samplevalue3", + "samplekey4": "samplevalue4", + "samplekey5": "samplevalue5", + }, + } + + testPutItem(t, sp, entity) + + collected := []string{} + + walkFunc := func(item *Item) error { + collected = append(collected, item.ID) + return nil + } + + sp.Walk(walkFunc) + if len(collected) != testIterationCount { + t.Fatalf("unable to walk on all the items in the packer") + } +} + func TestStoragePackerV2_Inmem(t *testing.T) { sp, err := NewStoragePackerV2(&Config{ BucketBaseCount: testBucketBaseCount, From 55d4ce7c67429524d210e43ad9d337c5d3807fff Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Mon, 7 May 2018 11:33:49 -0400 Subject: [PATCH 04/38] tests for bits needed --- helper/storagepacker/storagepacker_v2.go | 17 +++++++--- helper/storagepacker/storagepacker_v2_test.go | 34 ++++++++++++++++--- 2 files changed, 42 insertions(+), 9 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 57fbd8e660cbe..18c37d5cb637f 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -559,7 +559,18 @@ func (s *StoragePackerV2) shardBucketIndex(itemID string, depth int) (string, er // bitsNeeded returns the minimum number of bits required to enumerate the // natural numbers below the given value func bitsNeeded(value int) int { - return int(math.Ceil(math.Log2(float64(value)))) + if value < 2 { + return 1 + } + bitCount := int(math.Ceil(math.Log2(float64(value)))) + if isPowerOfTwo(value) { + bitCount++ + } + return bitCount +} + +func isPowerOfTwo(val int) bool { + return val != 0 && (val&(val-1) == 0) } func (s *StoragePackerV2) newBucket(key string) *LockedBucket { @@ -572,10 +583,6 @@ func (s *StoragePackerV2) newBucket(key string) *LockedBucket { } } -func isPowerOfTwo(val int) bool { - return val != 0 && (val&(val-1) == 0) -} - type WalkFunc func(item *Item) error // Walk traverses through all the buckets and all the items in each bucket and diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.go index 5c9ce168abc69..9374408ba0167 100644 --- a/helper/storagepacker/storagepacker_v2_test.go +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -16,12 +16,38 @@ import ( ) const ( - testIterationCount = 5000 - testBucketBaseCount = defaultBucketBaseCount - testBucketShardCount = defaultBucketShardCount - testBucketMaxSize = defaultBucketMaxSize + testIterationCount = 5000 + //testBucketBaseCount = defaultBucketBaseCount + //testBucketShardCount = defaultBucketShardCount + testBucketMaxSize = defaultBucketMaxSize + + testBucketBaseCount = 1 + testBucketShardCount = 2 ) +func TestStoragePacker_bitsNeeded(t *testing.T) { + testData := map[int]int{ + -1: 1, + 0: 1, + 1: 1, + 2: 2, + 3: 2, + 4: 3, + 7: 3, + 8: 4, + 15: 4, + 16: 5, + 25: 5, + 32: 6, + 64: 7, + } + for value, expected := range testData { + if bitsNeeded(value) != expected { + t.Fatalf("expected bit count of %d for %d", expected, value) + } + } +} + func TestStoragePackerV2_Walk(t *testing.T) { sp, err := NewStoragePackerV2(&Config{ BucketBaseCount: testBucketBaseCount, From d2aeb5e527f486210ff957284faef0c82e7e639f Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Mon, 7 May 2018 11:55:34 -0400 Subject: [PATCH 05/38] add comment --- helper/storagepacker/storagepacker_v2.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 18c37d5cb637f..674cae3de40a7 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -569,6 +569,8 @@ func bitsNeeded(value int) int { return bitCount } +// isPowerOfTwo returns true if the given value is a power of two, false +// otherwise. func isPowerOfTwo(val int) bool { return val != 0 && (val&(val-1) == 0) } From b26a916fceb22f8f667bf11675a1b8fdd37dc80b Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 30 Jan 2019 17:12:41 -0500 Subject: [PATCH 06/38] Update max size --- helper/storagepacker/storagepacker_v2.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 674cae3de40a7..0962556803ad2 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -23,11 +23,11 @@ const ( defaultBucketShardCount = 16 // Larger size of the bucket size adversely affects the performance of the // storage packer. Also, some of the backends impose a maximum size limit - // on the objects that gets persisted. For example, Consul imposes 512KB + // on the objects that gets persisted. For example, Consul imposes 256KB if using transactions // and DynamoDB imposes 400KB. Going forward, if there exists storage // backends that has more constrained limits, this will have to become more - // flexible. For now, 380KB seems like a decent bargain. - defaultBucketMaxSize = 380 * 1024 + // flexible. For now, 240KB seems like a decent value. + defaultBucketMaxSize = 240 * 1024 ) type Config struct { From e783980f81574b061a23cabd30a3d2cce8d705e6 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 30 Jan 2019 18:41:14 -0500 Subject: [PATCH 07/38] First cut at migrating things over --- helper/storagepacker/storagepacker_v1.go | 182 +++++++++++++++--- ...agepacker_v2.go => storagepacker_v2.notgo} | 103 ---------- ...v2_test.go => storagepacker_v2_test.notgo} | 0 helper/storagepacker/types.pb.go | 149 ++++++-------- helper/storagepacker/types.proto | 17 +- vault/identity_store.go | 30 ++- vault/identity_store_util.go | 67 ++++++- 7 files changed, 301 insertions(+), 247 deletions(-) rename helper/storagepacker/{storagepacker_v2.go => storagepacker_v2.notgo} (80%) rename helper/storagepacker/{storagepacker_v2_test.go => storagepacker_v2_test.notgo} (100%) diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index 81b18a79cbc97..5b80f2822475b 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -7,7 +7,9 @@ import ( "fmt" "strconv" "strings" + "sync" + radix "github.com/armon/go-radix" "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" @@ -16,26 +18,76 @@ import ( "github.com/hashicorp/vault/logical" ) +type HashType uint + const ( - bucketCount = 256 + HashTypeBlake2b256 HashType = iota + HashTypeMD5 +) + +const ( + defaultBucketBaseCount = 256 + defaultBucketShardCount = 16 + // Larger size of the bucket size adversely affects the performance of the + // storage packer. Also, some of the backends impose a maximum size limit + // on the objects that gets persisted. For example, Consul imposes 256KB if using transactions + // and DynamoDB imposes 400KB. Going forward, if there exists storage + // backends that has more constrained limits, this will have to become more + // flexible. For now, 240KB seems like a decent value. + defaultBucketMaxSize = 240 * 1024 + DefaultStoragePackerBucketsPrefix = "packer/buckets/" ) -// StoragePackerV1 packs the objects into a specific number of buckets by -// hashing its ID and indexing it. Currently this supports only 256 bucket -// entries and hence relies on the first byte of the hash value for indexing. -// This is only here for backwards compatibility. Use StoragePackerV2 for any -// newer implementations which allows for infinite storage capacity. +type Config struct { + // View is the storage to be used by all the buckets + View logical.Storage + + // ViewPrefix is the prefix to be used for the buckets in the view + ViewPrefix string + + // Logger for output + Logger log.Logger + + // BucketBaseCount is the number of buckets to create at the base level. + // The value should be a power of 2. + BucketBaseCount int + + // BucketShardCount is the number of sub-buckets a bucket gets sharded into + // when it reaches the maximum threshold. The value should be a power of 2. + BucketShardCount int + + // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When + // the size of the bucket reaches a threshold relative to this limit, it + // gets sharded into the configured number of pieces incrementally. + BucketMaxSize int64 + + // The hash type to use at the base bucket level. Shards always use blake. + // For backwards compat. + BaseHashType HashType +} + +// StoragePacker packs many items into abstractions called buckets. The goal +// is to employ a reduced number of storage entries for a relatively huge +// number of items. This is the second version of the utility which supports +// indefinitely expanding the capacity of the storage by sharding the buckets +// when they exceed the imposed limit. type StoragePackerV1 struct { - view logical.Storage - logger log.Logger + *Config storageLocks []*locksutil.LockEntry - viewPrefix string + bucketsCache *radix.Tree +} + +// LockedBucket embeds a bucket and its corresponding lock to ensure thread +// safety +type LockedBucket struct { + *Bucket + lock sync.RWMutex } // BucketPath returns the storage entry key for a given bucket key func (s *StoragePackerV1) BucketPath(bucketKey string) string { - return s.viewPrefix + bucketKey + return s.ViewPrefix + bucketKey } // BucketKeyHash returns the MD5 hash of the bucket storage key in which @@ -53,8 +105,8 @@ func (s *StoragePackerV1) BucketKeyHashByKey(bucketKey string) string { } // View returns the storage view configured to be used by the packer -func (s *StoragePackerV1) View() logical.Storage { - return s.view +func (s *StoragePackerV1) StorageView() logical.Storage { + return s.View } // Get returns a bucket for a given key @@ -68,7 +120,7 @@ func (s *StoragePackerV1) GetBucket(key string) (*Bucket, error) { defer lock.RUnlock() // Read from the underlying view - storageEntry, err := s.view.Get(context.Background(), key) + storageEntry, err := s.View.Get(context.Background(), key) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) } @@ -155,7 +207,7 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { bucketPath := s.BucketPath(bucketKey) // Read from underlying view - storageEntry, err := s.view.Get(context.Background(), bucketPath) + storageEntry, err := s.View.Get(context.Background(), bucketPath) if err != nil { return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } @@ -211,8 +263,8 @@ func (s *StoragePackerV1) PutBucket(bucket *Bucket) error { return fmt.Errorf("missing key") } - if !strings.HasPrefix(bucket.Key, s.viewPrefix) { - return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.viewPrefix) + if !strings.HasPrefix(bucket.Key, s.ViewPrefix) { + return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.ViewPrefix) } marshaledBucket, err := proto.Marshal(bucket) @@ -228,7 +280,7 @@ func (s *StoragePackerV1) PutBucket(bucket *Bucket) error { } // Store the compressed value - err = s.view.Put(context.Background(), &logical.StorageEntry{ + err = s.View.Put(context.Background(), &logical.StorageEntry{ Key: bucket.Key, Value: compressedBucket, }) @@ -294,7 +346,7 @@ func (s *StoragePackerV1) PutItem(item *Item) error { defer lock.Unlock() // Check if there is an existing bucket for a given key - storageEntry, err := s.view.Get(context.Background(), bucketPath) + storageEntry, err := s.View.Get(context.Background(), bucketPath) if err != nil { return errwrap.Wrapf("failed to read packed storage bucket entry: {{err}}", err) } @@ -330,26 +382,102 @@ func (s *StoragePackerV1) PutItem(item *Item) error { } // NewStoragePackerV1 creates a new storage packer for a given view -func NewStoragePackerV1(view logical.Storage, logger log.Logger, viewPrefix string) (*StoragePackerV1, error) { - if view == nil { +func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, error) { + if config.View == nil { return nil, fmt.Errorf("nil view") } - if viewPrefix == "" { - viewPrefix = DefaultStoragePackerBucketsPrefix + if config.ViewPrefix == "" { + config.ViewPrefix = DefaultStoragePackerBucketsPrefix + } + + if !strings.HasSuffix(config.ViewPrefix, "/") { + config.ViewPrefix = config.ViewPrefix + "/" + } + + if config.BucketBaseCount == 0 { + config.BucketBaseCount = defaultBucketBaseCount + } + + // At this point, look for an existing saved configuration + var needPersist bool + entry, err := config.View.Get(ctx, config.ViewPrefix+"config") + if err != nil { + return nil, errwrap.Wrapf("error checking for existing storagepacker config: {{err}}", err) + } + if entry != nil { + needPersist = false + var exist Config + if err := entry.DecodeJSON(&exist); err != nil { + return nil, errwrap.Wrapf("error decoding existing storagepacker config: {{err}}", err) + } + // If we have an existing config, we copy the only two things we need + // constant: + // + // 1. The bucket base count, so we know how many to expect + // 2. The base hash type. We need to know how to hash at the base. All + // shards will use Blake. + // + // The rest of the values can change; the max size can change based on + // e.g. if storage is migrated, so as long as we don't move to a new + // location with a smaller value we're fine (and even then we're fine + // if we can read it; otherwise storage migration would have failed + // anyways). The shard count is recorded in each bucket at the time + // it's sharded; if we realize it's more efficient to do some other + // value later we can update it and use that going forward for new + // shards. + config.BucketBaseCount = exist.BucketBaseCount + config.BaseHashType = exist.BaseHashType + } + + if config.BucketShardCount == 0 { + config.BucketShardCount = defaultBucketShardCount + } + + if config.BucketMaxSize == 0 { + config.BucketMaxSize = defaultBucketMaxSize + } + + if !isPowerOfTwo(config.BucketBaseCount) { + return nil, fmt.Errorf("bucket base count of %d is not a power of two", config.BucketBaseCount) } - if !strings.HasSuffix(viewPrefix, "/") { - viewPrefix = viewPrefix + "/" + if !isPowerOfTwo(config.BucketShardCount) { + return nil, fmt.Errorf("bucket shard count of %d is not a power of two", config.BucketShardCount) + } + + if config.BucketShardCount < 2 { + return nil, fmt.Errorf("bucket shard count should at least be 2") + } + + if needPersist { + entry, err := logical.StorageEntryJSON(config.ViewPrefix+"config", config) + if err != nil { + return nil, errwrap.Wrapf("error encoding storagepacker config: {{err}}", err) + } + if err := config.View.Put(ctx, entry); err != nil { + return nil, errwrap.Wrapf("error storing storagepacker config: {{err}}", err) + } } // Create a new packer object for the given view packer := &StoragePackerV1{ - view: view, - viewPrefix: viewPrefix, - logger: logger, + Config: config, + bucketsCache: radix.New(), storageLocks: locksutil.CreateLocks(), } return packer, nil } + +// isPowerOfTwo returns true if the given value is a power of two, false +// otherwise. We also return false on 1 because there'd be no point. +func isPowerOfTwo(val int) bool { + switch val { + case 0, 1: + return false + default: + return val&(val-1) == 0 + } + return false +} diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.notgo similarity index 80% rename from helper/storagepacker/storagepacker_v2.go rename to helper/storagepacker/storagepacker_v2.notgo index 0962556803ad2..83b96e04c180d 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.notgo @@ -6,118 +6,15 @@ import ( "math" "strconv" "strings" - "sync" - radix "github.com/armon/go-radix" "github.com/golang/protobuf/proto" "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/strutil" - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/cryptoutil" "github.com/hashicorp/vault/logical" ) -const ( - defaultBucketBaseCount = 256 - defaultBucketShardCount = 16 - // Larger size of the bucket size adversely affects the performance of the - // storage packer. Also, some of the backends impose a maximum size limit - // on the objects that gets persisted. For example, Consul imposes 256KB if using transactions - // and DynamoDB imposes 400KB. Going forward, if there exists storage - // backends that has more constrained limits, this will have to become more - // flexible. For now, 240KB seems like a decent value. - defaultBucketMaxSize = 240 * 1024 -) - -type Config struct { - // View is the storage to be used by all the buckets - View logical.Storage - - // ViewPrefix is the prefix to be used for the buckets in the view - ViewPrefix string - - // Logger for output - Logger log.Logger - - // BucketBaseCount is the number of buckets to create at the base level. - // The value should be a power of 2. - BucketBaseCount int - - // BucketShardCount is the number of sub-buckets a bucket gets sharded into - // when it reaches the maximum threshold. The value should be a power of 2. - BucketShardCount int - - // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When - // the size of the bucket reaches a threshold relative to this limit, it - // gets sharded into the configured number of pieces incrementally. - BucketMaxSize int64 -} - -// StoragePackerV2 packs many items into abstractions called buckets. The goal -// is to employ a reduced number of storage entries for a relatively huge -// number of items. This is the second version of the utility which supports -// indefinitely expanding the capacity of the storage by sharding the buckets -// when they exceed the imposed limit. -type StoragePackerV2 struct { - config *Config - bucketsCache *radix.Tree -} - -// LockedBucket embeds a bucket and its corresponding lock to ensure thread -// safety -type LockedBucket struct { - *BucketV2 - lock sync.RWMutex -} - -// NewStoragePackerV2 creates a new storage packer for a given view -func NewStoragePackerV2(config *Config) (*StoragePackerV2, error) { - if config.View == nil { - return nil, fmt.Errorf("nil view") - } - - if config.ViewPrefix == "" { - config.ViewPrefix = DefaultStoragePackerBucketsPrefix - } - - if !strings.HasSuffix(config.ViewPrefix, "/") { - config.ViewPrefix = config.ViewPrefix + "/" - } - - if config.BucketBaseCount == 0 { - config.BucketBaseCount = defaultBucketBaseCount - } - - if config.BucketShardCount == 0 { - config.BucketShardCount = defaultBucketShardCount - } - - if config.BucketMaxSize == 0 { - config.BucketMaxSize = defaultBucketMaxSize - } - - if !isPowerOfTwo(config.BucketBaseCount) { - return nil, fmt.Errorf("bucket base count of %d is not a power of two", config.BucketBaseCount) - } - - if !isPowerOfTwo(config.BucketShardCount) { - return nil, fmt.Errorf("bucket shard count of %d is not a power of two", config.BucketShardCount) - } - - if config.BucketShardCount < 2 { - return nil, fmt.Errorf("bucket shard count should at least be 2") - } - - // Create a new packer object for the given view - packer := &StoragePackerV2{ - config: config, - bucketsCache: radix.New(), - } - - return packer, nil -} - // Clone creates a replica of the bucket func (b *BucketV2) Clone() (*BucketV2, error) { if b == nil { diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.notgo similarity index 100% rename from helper/storagepacker/storagepacker_v2_test.go rename to helper/storagepacker/storagepacker_v2_test.notgo diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 9f24b30760254..6e5554577a797 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -71,167 +71,126 @@ func (m *Item) GetMessage() *any.Any { return nil } -// BucketV2 is a construct to hold multiple items within itself. This +// Bucket is a construct to hold multiple items within itself. This // abstraction contains multiple buckets of the same kind within itself and // shares amont them the items that get inserted. When the bucket as a whole // gets too big to hold more items, the contained buckets gets pushed out only // to become independent buckets. Hence, this can grow infinitely in terms of // storage space for items that get inserted. -type BucketV2 struct { +type Bucket struct { // Key is the storage path where the bucket gets stored Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Items holds the items contained within this bucket - Items map[string]*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` // Buckets are the buckets contained within this bucket - Buckets map[string]*BucketV2 `sentinel:"" protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Sharded indicates if the contained buckets are pushed out or not Sharded bool `sentinel:"" protobuf:"varint,4,opt,name=sharded,proto3" json:"sharded,omitempty"` + // The number of shards created in this bucket + ShardCount uint32 `sentinel:"" protobuf:"varint,5,opt,name=shard_count,json=shardCount,proto3" json:"shard_count,omitempty"` // Size of this bucket in number of bytes - Size int64 `sentinel:"" protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` + Size uint32 `sentinel:"" protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *BucketV2) Reset() { *m = BucketV2{} } -func (m *BucketV2) String() string { return proto.CompactTextString(m) } -func (*BucketV2) ProtoMessage() {} -func (*BucketV2) Descriptor() ([]byte, []int) { +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { return fileDescriptor_c0e98c66c4f51b7f, []int{1} } -func (m *BucketV2) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BucketV2.Unmarshal(m, b) +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) } -func (m *BucketV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BucketV2.Marshal(b, m, deterministic) +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) } -func (m *BucketV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_BucketV2.Merge(m, src) +func (m *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(m, src) } -func (m *BucketV2) XXX_Size() int { - return xxx_messageInfo_BucketV2.Size(m) +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) } -func (m *BucketV2) XXX_DiscardUnknown() { - xxx_messageInfo_BucketV2.DiscardUnknown(m) +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) } -var xxx_messageInfo_BucketV2 proto.InternalMessageInfo +var xxx_messageInfo_Bucket proto.InternalMessageInfo -func (m *BucketV2) GetKey() string { +func (m *Bucket) GetKey() string { if m != nil { return m.Key } return "" } -func (m *BucketV2) GetItems() map[string]*Item { +func (m *Bucket) GetItems() []*Item { if m != nil { return m.Items } return nil } -func (m *BucketV2) GetBuckets() map[string]*BucketV2 { +func (m *Bucket) GetBuckets() map[string]*Bucket { if m != nil { return m.Buckets } return nil } -func (m *BucketV2) GetSharded() bool { +func (m *Bucket) GetSharded() bool { if m != nil { return m.Sharded } return false } -func (m *BucketV2) GetSize() int64 { +func (m *Bucket) GetShardCount() uint32 { if m != nil { - return m.Size + return m.ShardCount } return 0 } -type Bucket struct { - Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_c0e98c66c4f51b7f, []int{2} -} - -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetKey() string { +func (m *Bucket) GetSize() uint32 { if m != nil { - return m.Key - } - return "" -} - -func (m *Bucket) GetItems() []*Item { - if m != nil { - return m.Items + return m.Size } - return nil + return 0 } func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") - proto.RegisterType((*BucketV2)(nil), "storagepacker.BucketV2") - proto.RegisterMapType((map[string]*BucketV2)(nil), "storagepacker.BucketV2.BucketsEntry") - proto.RegisterMapType((map[string]*Item)(nil), "storagepacker.BucketV2.ItemsEntry") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") + proto.RegisterMapType((map[string]*Bucket)(nil), "storagepacker.Bucket.BucketsEntry") } func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } var fileDescriptor_c0e98c66c4f51b7f = []byte{ - // 343 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x4f, 0x6b, 0xe3, 0x30, - 0x10, 0xc5, 0xb1, 0x9d, 0x7f, 0x3b, 0xd9, 0x5d, 0x16, 0x6d, 0xa1, 0x6a, 0x4e, 0xc6, 0xf4, 0xe0, - 0x1e, 0x2a, 0xd1, 0xf4, 0x12, 0x7a, 0x28, 0x34, 0x90, 0x42, 0x0f, 0xbd, 0xb8, 0xd0, 0x43, 0x6f, - 0xb2, 0x3d, 0xb5, 0x4d, 0xec, 0xc8, 0x48, 0x72, 0xc0, 0xfd, 0xc4, 0xfd, 0x18, 0x25, 0x76, 0x4c, - 0x93, 0xe2, 0xdc, 0x46, 0x9a, 0xf7, 0x1e, 0xbf, 0x19, 0x09, 0xdc, 0x14, 0xf3, 0x12, 0x15, 0xd7, - 0x46, 0x2a, 0x91, 0x60, 0x29, 0xa2, 0x35, 0x2a, 0x6e, 0xea, 0x12, 0x35, 0x2b, 0x95, 0x34, 0x92, - 0xfc, 0x39, 0x6a, 0xcd, 0x2e, 0x12, 0x29, 0x93, 0x1c, 0x79, 0xd3, 0x0c, 0xab, 0x77, 0x2e, 0x36, - 0x75, 0xab, 0xf4, 0x1e, 0x61, 0xf0, 0x64, 0xb0, 0x20, 0x7f, 0xc1, 0xce, 0x62, 0x6a, 0xb9, 0x96, - 0xff, 0x2b, 0xb0, 0xb3, 0x98, 0x30, 0x18, 0x17, 0xa8, 0xb5, 0x48, 0x90, 0xda, 0xae, 0xe5, 0x4f, - 0xe7, 0x67, 0xac, 0x0d, 0x61, 0x5d, 0x08, 0x7b, 0xd8, 0xd4, 0x41, 0x27, 0xf2, 0x3e, 0x6d, 0x98, - 0x2c, 0xab, 0x68, 0x8d, 0xe6, 0x75, 0x4e, 0xfe, 0x81, 0xb3, 0xc6, 0x7a, 0x9f, 0xb6, 0x2b, 0xc9, - 0x02, 0x86, 0x99, 0xc1, 0x42, 0x53, 0xdb, 0x75, 0xfc, 0xe9, 0xdc, 0x63, 0x47, 0x80, 0xac, 0x73, - 0xb2, 0x1d, 0x8b, 0x5e, 0x6d, 0x8c, 0xaa, 0x83, 0xd6, 0x40, 0xee, 0x61, 0x1c, 0x36, 0x5d, 0x4d, - 0x9d, 0xc6, 0x7b, 0x79, 0xca, 0xdb, 0x16, 0x7b, 0x77, 0x67, 0x22, 0x14, 0xc6, 0x3a, 0x15, 0x2a, - 0xc6, 0x98, 0x0e, 0x5c, 0xcb, 0x9f, 0x04, 0xdd, 0x91, 0x10, 0x18, 0xe8, 0xec, 0x03, 0xe9, 0xd0, - 0xb5, 0x7c, 0x27, 0x68, 0xea, 0xd9, 0x33, 0xc0, 0x37, 0x42, 0xcf, 0x1c, 0x57, 0x30, 0xdc, 0x8a, - 0xbc, 0xea, 0x96, 0xf2, 0xff, 0x07, 0xcb, 0xce, 0x1b, 0xb4, 0x8a, 0x3b, 0x7b, 0x61, 0xcd, 0x5e, - 0xe0, 0xf7, 0x21, 0x55, 0x4f, 0xe0, 0xf5, 0x71, 0xe0, 0xf9, 0x89, 0xe1, 0x0e, 0x42, 0xbd, 0x15, - 0x8c, 0xda, 0xeb, 0x7e, 0xbe, 0xc3, 0x3d, 0xf7, 0xf3, 0x35, 0x8a, 0xe5, 0xcd, 0x1b, 0x4f, 0x32, - 0x93, 0x56, 0x21, 0x8b, 0x64, 0xc1, 0x53, 0xa1, 0xd3, 0x2c, 0x92, 0xaa, 0xe4, 0x5b, 0x51, 0xe5, - 0x86, 0xf7, 0x7d, 0xb1, 0x70, 0xd4, 0xbc, 0xfd, 0xed, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfb, - 0x19, 0x15, 0xef, 0x81, 0x02, 0x00, 0x00, + // 321 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xf4, 0x30, + 0x10, 0xc6, 0x69, 0xf7, 0xdf, 0xfb, 0xce, 0xba, 0x22, 0x51, 0x21, 0xee, 0xc5, 0xb2, 0xa7, 0x8a, + 0x90, 0xe0, 0x7a, 0x11, 0xf1, 0xe2, 0x8a, 0x82, 0x47, 0x73, 0xf4, 0x22, 0x69, 0x3b, 0xb6, 0x65, + 0xdb, 0xa6, 0x24, 0xe9, 0x42, 0xfd, 0x3e, 0x7e, 0x4f, 0xd9, 0x74, 0x0b, 0xae, 0xec, 0xa9, 0xd3, + 0x79, 0x7e, 0x79, 0xe6, 0xc9, 0x04, 0x82, 0x0c, 0x8b, 0x1a, 0x35, 0x37, 0x56, 0x69, 0x99, 0x62, + 0x2d, 0xe3, 0x35, 0x6a, 0x6e, 0xdb, 0x1a, 0x0d, 0xab, 0xb5, 0xb2, 0x8a, 0xcc, 0xf6, 0xa4, 0xf9, + 0x45, 0xaa, 0x54, 0x5a, 0x20, 0x77, 0x62, 0xd4, 0x7c, 0x72, 0x59, 0xb5, 0x1d, 0xb9, 0x78, 0x81, + 0xe1, 0xab, 0xc5, 0x92, 0x1c, 0x83, 0x9f, 0x27, 0xd4, 0x0b, 0xbc, 0xf0, 0xbf, 0xf0, 0xf3, 0x84, + 0x30, 0x98, 0x94, 0x68, 0x8c, 0x4c, 0x91, 0xfa, 0x81, 0x17, 0x4e, 0x97, 0x67, 0xac, 0x33, 0x61, + 0xbd, 0x09, 0x7b, 0xac, 0x5a, 0xd1, 0x43, 0x8b, 0x6f, 0x1f, 0xc6, 0xab, 0x26, 0x5e, 0xa3, 0x25, + 0x27, 0x30, 0x58, 0x63, 0xbb, 0xf3, 0xda, 0x96, 0xe4, 0x0a, 0x46, 0xb9, 0xc5, 0xd2, 0x50, 0x3f, + 0x18, 0x84, 0xd3, 0xe5, 0x29, 0xdb, 0x8b, 0xc7, 0xb6, 0x01, 0x44, 0x47, 0x90, 0x07, 0x98, 0x44, + 0xce, 0xc6, 0xd0, 0x81, 0x83, 0x17, 0x7f, 0xe0, 0x6e, 0xc8, 0xee, 0x63, 0x9e, 0x2b, 0xab, 0x5b, + 0xd1, 0x1f, 0x21, 0x14, 0x26, 0x26, 0x93, 0x3a, 0xc1, 0x84, 0x0e, 0x03, 0x2f, 0xfc, 0x27, 0xfa, + 0x5f, 0x72, 0x09, 0x53, 0x57, 0x7e, 0xc4, 0xaa, 0xa9, 0x2c, 0x1d, 0x05, 0x5e, 0x38, 0x13, 0xe0, + 0x5a, 0x4f, 0xdb, 0x0e, 0x21, 0x30, 0x34, 0xf9, 0x17, 0xd2, 0xb1, 0x53, 0x5c, 0x3d, 0x7f, 0x83, + 0xa3, 0xdf, 0x73, 0x0e, 0xdc, 0xec, 0x1a, 0x46, 0x1b, 0x59, 0x34, 0xfd, 0x92, 0xce, 0x0f, 0x86, + 0x15, 0x1d, 0x73, 0xef, 0xdf, 0x79, 0xab, 0x9b, 0x77, 0x9e, 0xe6, 0x36, 0x6b, 0x22, 0x16, 0xab, + 0x92, 0x67, 0xd2, 0x64, 0x79, 0xac, 0x74, 0xcd, 0x37, 0xb2, 0x29, 0x2c, 0x3f, 0xf4, 0xb0, 0xd1, + 0xd8, 0x6d, 0xfc, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb0, 0x6d, 0x06, 0xf7, 0x01, 0x00, + 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 7c2bc4bf6a093..0683210e31fce 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -14,26 +14,23 @@ message Item { google.protobuf.Any message = 2; } -// BucketV2 is a construct to hold multiple items within itself. This +// Bucket is a construct to hold multiple items within itself. This // abstraction contains multiple buckets of the same kind within itself and // shares amont them the items that get inserted. When the bucket as a whole // gets too big to hold more items, the contained buckets gets pushed out only // to become independent buckets. Hence, this can grow infinitely in terms of // storage space for items that get inserted. -message BucketV2{ +message Bucket { // Key is the storage path where the bucket gets stored string key = 1; // Items holds the items contained within this bucket - map items = 2; + repeated Item items = 2; // Buckets are the buckets contained within this bucket - map buckets = 3; + map buckets = 3; // Sharded indicates if the contained buckets are pushed out or not bool sharded = 4; + // The number of shards created in this bucket + uint32 shard_count = 5; // Size of this bucket in number of bytes - int64 size = 5; -} - -message Bucket { - string key = 1; - repeated Item items = 2; + uint32 size = 6; } diff --git a/vault/identity_store.go b/vault/identity_store.go index c322fe7305ae2..4f340420cdffe 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -19,7 +19,8 @@ import ( ) const ( - groupBucketsPrefix = "packer/group/buckets/" + entityBucketsPrefix = "packer/buckets/" + groupBucketsPrefix = "packer/group/buckets/" ) var ( @@ -61,12 +62,35 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo core.AddLogger(entitiesPackerLogger) groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") core.AddLogger(groupsPackerLogger) - iStore.entityPacker, err = storagepacker.NewStoragePackerV1(iStore.view, entitiesPackerLogger, "") + + // If we find buckets, we've already written values so we fall back to md5 + // at the top level for compatibility. If we don't, this is a fresh install + // and we use Blake at the top level. + baseHashType := storagepacker.HashTypeBlake2b256 + vals, err := iStore.view.List(ctx, entityBucketsPrefix) + if err != nil { + return nil, err + } + if len(vals) > 0 { + baseHashType = storagepacker.HashTypeMD5 + } + iStore.entityPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ + View: iStore.view, + ViewPrefix: entityBucketsPrefix, + Logger: entitiesPackerLogger, + BaseHashType: baseHashType, + }) if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } - iStore.groupPacker, err = storagepacker.NewStoragePackerV1(iStore.view, groupsPackerLogger, groupBucketsPrefix) + iStore.groupPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ + View: iStore.view, + ViewPrefix: groupBucketsPrefix, + Logger: groupsPackerLogger, + BaseHashType: baseHashType, + }) + if err != nil { return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) } diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 0dc983b14d5af..9c29ee8eb58f1 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -75,13 +75,35 @@ func (i *IdentityStore) sanitizeName(name string) string { func (i *IdentityStore) loadGroups(ctx context.Context) error { i.logger.Debug("identity loading groups") - existing, err := i.groupPacker.View().List(ctx, groupBucketsPrefix) - if err != nil { - return errwrap.Wrapf("failed to scan for groups: {{err}}", err) + + allBuckets := make([]string, 0, 257) + + var walkPrefixes func(in string) error + walkPrefixes = func(in string) error { + existing, err := i.groupPacker.StorageView().List(ctx, in) + if err != nil { + return errwrap.Wrapf("failed to scan for groups: {{err}}", err) + } + for _, key := range existing { + if key == "config" { + continue + } + if key[len(key)-1] == '/' { + if err := walkPrefixes(key); err != nil { + return err + } + } else { + allBuckets = append(allBuckets, key) + } + } + return nil + } + if err := walkPrefixes(groupBucketsPrefix); err != nil { + return err } - i.logger.Debug("groups collected", "num_existing", len(existing)) - for _, key := range existing { + i.logger.Debug("group buckets collected", "num_existing", len(allBuckets)) + for _, key := range allBuckets { bucket, err := i.groupPacker.GetBucket(i.groupPacker.BucketPath(key)) if err != nil { return err @@ -154,19 +176,46 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { func (i *IdentityStore) loadEntities(ctx context.Context) error { // Accumulate existing entities i.logger.Debug("loading entities") - existing, err := i.entityPacker.View().List(ctx, storagepacker.DefaultStoragePackerBucketsPrefix) + existing, err := i.entityPacker.StorageView().List(ctx, storagepacker.DefaultStoragePackerBucketsPrefix) if err != nil { return errwrap.Wrapf("failed to scan for entities: {{err}}", err) } - i.logger.Debug("entities collected", "num_existing", len(existing)) + + allBuckets := make([]string, 0, 257) + + var walkPrefixes func(in string) error + walkPrefixes = func(in string) error { + existing, err := i.entityPacker.StorageView().List(ctx, in) + if err != nil { + return errwrap.Wrapf("failed to scan for entities: {{err}}", err) + } + for _, key := range existing { + if key == "config" { + continue + } + if key[len(key)-1] == '/' { + if err := walkPrefixes(key); err != nil { + return err + } + } else { + allBuckets = append(allBuckets, key) + } + } + return nil + } + if err := walkPrefixes(entityBucketsPrefix); err != nil { + return err + } + + i.logger.Debug("entity buckets collected", "num_existing", len(allBuckets)) // Make the channels used for the worker pool broker := make(chan string) quit := make(chan bool) // Buffer these channels to prevent deadlocks - errs := make(chan error, len(existing)) - result := make(chan *storagepacker.Bucket, len(existing)) + errs := make(chan error, len(allBuckets)) + result := make(chan *storagepacker.Bucket, len(allBuckets)) // Use a wait group wg := &sync.WaitGroup{} From abed3b390178097d21e621e54f773c01fa52d422 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 30 Jan 2019 18:44:01 -0500 Subject: [PATCH 08/38] Readd old version as legacy --- helper/storagepacker/legacy_storagepacker.go | 354 +++++++++++++++++++ 1 file changed, 354 insertions(+) create mode 100644 helper/storagepacker/legacy_storagepacker.go diff --git a/helper/storagepacker/legacy_storagepacker.go b/helper/storagepacker/legacy_storagepacker.go new file mode 100644 index 0000000000000..7f337c80dd10d --- /dev/null +++ b/helper/storagepacker/legacy_storagepacker.go @@ -0,0 +1,354 @@ +package storagepacker + +import ( + "context" + "crypto/md5" + "encoding/hex" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/hashicorp/errwrap" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/compressutil" + "github.com/hashicorp/vault/helper/locksutil" + "github.com/hashicorp/vault/logical" +) + +const ( + LegacyStoragePackerBucketsPrefix = "packer/buckets/" +) + +// LegacyStoragePacker packs the objects into a specific number of buckets by hashing +// its ID and indexing it. Currently this supports only 256 bucket entries and +// hence relies on the first byte of the hash value for indexing. The items +// that gets inserted into the packer should implement StorageBucketItem +// interface. +type LegacyStoragePacker struct { + view logical.Storage + logger log.Logger + storageLocks []*locksutil.LockEntry + viewPrefix string +} + +// BucketPath returns the storage entry key for a given bucket key +func (s *LegacyStoragePacker) BucketPath(bucketKey string) string { + return s.viewPrefix + bucketKey +} + +// BucketKeyHash returns the MD5 hash of the bucket storage key in which +// the item will be stored. The choice of MD5 is only for hash performance +// reasons since its value is not used for any security sensitive operation. +func (s *LegacyStoragePacker) BucketKeyHashByItemID(itemID string) string { + return s.BucketKeyHashByKey(s.BucketPath(s.BucketKey(itemID))) +} + +// BucketKeyHashByKey returns the MD5 hash of the bucket storage key +func (s *LegacyStoragePacker) BucketKeyHashByKey(bucketKey string) string { + hf := md5.New() + hf.Write([]byte(bucketKey)) + return hex.EncodeToString(hf.Sum(nil)) +} + +// View returns the storage view configured to be used by the packer +func (s *LegacyStoragePacker) View() logical.Storage { + return s.view +} + +// Get returns a bucket for a given key +func (s *LegacyStoragePacker) GetBucket(key string) (*Bucket, error) { + if key == "" { + return nil, fmt.Errorf("missing bucket key") + } + + lock := locksutil.LockForKey(s.storageLocks, key) + lock.RLock() + defer lock.RUnlock() + + // Read from the underlying view + storageEntry, err := s.view.Get(context.Background(), key) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) + } + if storageEntry == nil { + return nil, nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return nil, errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + var bucket Bucket + err = proto.Unmarshal(uncompressedData, &bucket) + if err != nil { + return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) + } + + return &bucket, nil +} + +// upsert either inserts a new item into the bucket or updates an existing one +// if an item with a matching key is already present. +func (s *Bucket) legacyUpsert(item *Item) error { + if s == nil { + return fmt.Errorf("nil storage bucket") + } + + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing item ID") + } + + // Look for an item with matching key and don't modify the collection + // while iterating + foundIdx := -1 + for itemIdx, bucketItems := range s.Items { + if bucketItems.ID == item.ID { + foundIdx = itemIdx + break + } + } + + // If there is no match, append the item, otherwise update it + if foundIdx == -1 { + s.Items = append(s.Items, item) + } else { + s.Items[foundIdx] = item + } + + return nil +} + +// BucketIndex returns the bucket key index for a given storage key +func (s *LegacyStoragePacker) BucketIndex(key string) uint8 { + hf := md5.New() + hf.Write([]byte(key)) + return uint8(hf.Sum(nil)[0]) +} + +// BucketKey returns the bucket key for a given item ID +func (s *LegacyStoragePacker) BucketKey(itemID string) string { + return strconv.Itoa(int(s.BucketIndex(itemID))) +} + +// DeleteItem removes the storage entry which the given key refers to from its +// corresponding bucket. +func (s *LegacyStoragePacker) DeleteItem(itemID string) error { + + if itemID == "" { + return fmt.Errorf("empty item ID") + } + + // Get the bucket key + bucketKey := s.BucketKey(itemID) + + // Prepend the view prefix + bucketPath := s.BucketPath(bucketKey) + + // Read from underlying view + storageEntry, err := s.view.Get(context.Background(), bucketPath) + if err != nil { + return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) + } + if storageEntry == nil { + return nil + } + + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + var bucket Bucket + err = proto.Unmarshal(uncompressedData, &bucket) + if err != nil { + return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) + } + + // Look for a matching storage entry + foundIdx := -1 + for itemIdx, item := range bucket.Items { + if item.ID == itemID { + foundIdx = itemIdx + break + } + } + + // If there is a match, remove it from the collection and persist the + // resulting collection + if foundIdx != -1 { + bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) + + // Persist bucket entry only if there is an update + err = s.PutBucket(&bucket) + if err != nil { + return err + } + } + + return nil +} + +// Put stores a packed bucket entry +func (s *LegacyStoragePacker) PutBucket(bucket *Bucket) error { + if bucket == nil { + return fmt.Errorf("nil bucket entry") + } + + if bucket.Key == "" { + return fmt.Errorf("missing key") + } + + if !strings.HasPrefix(bucket.Key, s.viewPrefix) { + return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.viewPrefix) + } + + marshaledBucket, err := proto.Marshal(bucket) + if err != nil { + return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) + } + + compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeSnappy, + }) + if err != nil { + return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) + } + + // Store the compressed value + err = s.view.Put(context.Background(), &logical.StorageEntry{ + Key: bucket.Key, + Value: compressedBucket, + }) + if err != nil { + return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) + } + + return nil +} + +// GetItem fetches the storage entry for a given key from its corresponding +// bucket. +func (s *LegacyStoragePacker) GetItem(itemID string) (*Item, error) { + if itemID == "" { + return nil, fmt.Errorf("empty item ID") + } + + bucketKey := s.BucketKey(itemID) + bucketPath := s.BucketPath(bucketKey) + + // Fetch the bucket entry + bucket, err := s.GetBucket(bucketPath) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) + } + if bucket == nil { + return nil, nil + } + + // Look for a matching storage entry in the bucket items + for _, item := range bucket.Items { + if item.ID == itemID { + return item, nil + } + } + + return nil, nil +} + +// PutItem stores a storage entry in its corresponding bucket +func (s *LegacyStoragePacker) PutItem(item *Item) error { + if item == nil { + return fmt.Errorf("nil item") + } + + if item.ID == "" { + return fmt.Errorf("missing ID in item") + } + + var err error + bucketKey := s.BucketKey(item.ID) + bucketPath := s.BucketPath(bucketKey) + + bucket := &Bucket{ + Key: bucketPath, + } + + // In this case, we persist the storage entry regardless of the read + // storageEntry below is nil or not. Hence, directly acquire write lock + // even to read the entry. + lock := locksutil.LockForKey(s.storageLocks, bucketPath) + lock.Lock() + defer lock.Unlock() + + // Check if there is an existing bucket for a given key + storageEntry, err := s.view.Get(context.Background(), bucketPath) + if err != nil { + return errwrap.Wrapf("failed to read packed storage bucket entry: {{err}}", err) + } + + if storageEntry == nil { + // If the bucket entry does not exist, this will be the only item the + // bucket that is going to be persisted. + bucket.Items = []*Item{ + item, + } + } else { + uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + if err != nil { + return errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) + } + if notCompressed { + uncompressedData = storageEntry.Value + } + + err = proto.Unmarshal(uncompressedData, bucket) + if err != nil { + return errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) + } + + err = bucket.legacyUpsert(item) + if err != nil { + return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) + } + } + + // Persist the result + return s.PutBucket(bucket) +} + +// NewLegacyStoragePacker creates a new storage packer for a given view +func NewLegacyStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*LegacyStoragePacker, error) { + if view == nil { + return nil, fmt.Errorf("nil view") + } + + if viewPrefix == "" { + viewPrefix = LegacyStoragePackerBucketsPrefix + } + + if !strings.HasSuffix(viewPrefix, "/") { + viewPrefix = viewPrefix + "/" + } + + // Create a new packer object for the given view + packer := &LegacyStoragePacker{ + view: view, + viewPrefix: viewPrefix, + logger: logger, + storageLocks: locksutil.CreateLocks(), + } + + return packer, nil +} From 4595b3bac83c43bee5800b5a4214becc0e0232c1 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 15:30:54 -0500 Subject: [PATCH 09/38] More interim work --- helper/cryptoutil/cryptoutil.go | 9 +- helper/storagepacker/storagepacker_v1.go | 529 ++++++++++++++--------- vault/identity_store.go | 79 ++-- vault/identity_store_util.go | 231 +++++----- 4 files changed, 497 insertions(+), 351 deletions(-) diff --git a/helper/cryptoutil/cryptoutil.go b/helper/cryptoutil/cryptoutil.go index 20a59459b1e13..a37086c645d80 100644 --- a/helper/cryptoutil/cryptoutil.go +++ b/helper/cryptoutil/cryptoutil.go @@ -2,13 +2,10 @@ package cryptoutil import "golang.org/x/crypto/blake2b" -func Blake2b256Hash(key string) ([]byte, error) { - hf, err := blake2b.New256(nil) - if err != nil { - return nil, err - } +func Blake2b256Hash(key string) []byte { + hf, _ := blake2b.New256(nil) hf.Write([]byte(key)) - return hf.Sum(nil), nil + return hf.Sum(nil) } diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index 5b80f2822475b..3285d800caa82 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -2,10 +2,9 @@ package storagepacker import ( "context" - "crypto/md5" "encoding/hex" + "errors" "fmt" - "strconv" "strings" "sync" @@ -14,20 +13,14 @@ import ( "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/compressutil" + "github.com/hashicorp/vault/helper/cryptoutil" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" ) -type HashType uint - -const ( - HashTypeBlake2b256 HashType = iota - HashTypeMD5 -) - const ( - defaultBucketBaseCount = 256 - defaultBucketShardCount = 16 + defaultBaseBucketBits = 8 + defaultBucketShardBits = 4 // Larger size of the bucket size adversely affects the performance of the // storage packer. Also, some of the backends impose a maximum size limit // on the objects that gets persisted. For example, Consul imposes 256KB if using transactions @@ -35,36 +28,29 @@ const ( // backends that has more constrained limits, this will have to become more // flexible. For now, 240KB seems like a decent value. defaultBucketMaxSize = 240 * 1024 - - DefaultStoragePackerBucketsPrefix = "packer/buckets/" ) type Config struct { - // View is the storage to be used by all the buckets - View logical.Storage + // BucketStorageView is the storage to be used by all the buckets + BucketStorageView *logical.StorageView `json:"-"` - // ViewPrefix is the prefix to be used for the buckets in the view - ViewPrefix string + // ConfigStorageView is the storage to store config info + ConfigStorageView *logical.StorageView `json:"-"` // Logger for output - Logger log.Logger + Logger log.Logger `json:"-"` - // BucketBaseCount is the number of buckets to create at the base level. - // The value should be a power of 2. - BucketBaseCount int + // BaseBucketBits is the number of bits to use for buckets at the base level + BaseBucketBits int `json:"base_bucket_bits"` - // BucketShardCount is the number of sub-buckets a bucket gets sharded into - // when it reaches the maximum threshold. The value should be a power of 2. - BucketShardCount int + // BucketShardBits is the number of bits to use for sub-buckets a bucket + // gets sharded into when it reaches the maximum threshold. + BucketShardBits int `json:"-"` // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When // the size of the bucket reaches a threshold relative to this limit, it // gets sharded into the configured number of pieces incrementally. - BucketMaxSize int64 - - // The hash type to use at the base bucket level. Shards always use blake. - // For backwards compat. - BaseHashType HashType + BucketMaxSize int64 `json:"-"` } // StoragePacker packs many items into abstractions called buckets. The goal @@ -76,51 +62,106 @@ type StoragePackerV1 struct { *Config storageLocks []*locksutil.LockEntry bucketsCache *radix.Tree + + // Note that we're slightly loosy-goosy with this lock. The reason is that + // outside of an identity store upgrade case, only PutItem will ever write + // a bucket, and that will always fetch a lock on the bucket first. This + // will also cover the sharding case since you'd get the parent lock first. + // So we can get away with only locking just when modifying, because we + // should already be locked in terms of an entry overwriting itself. + bucketsCacheLock sync.RWMutex } // LockedBucket embeds a bucket and its corresponding lock to ensure thread // safety type LockedBucket struct { + sync.RWMutex *Bucket - lock sync.RWMutex } -// BucketPath returns the storage entry key for a given bucket key -func (s *StoragePackerV1) BucketPath(bucketKey string) string { - return s.ViewPrefix + bucketKey +func (s *StoragePackerV1) BucketsView() *logical.StorageView { + return s.BucketStorageView } -// BucketKeyHash returns the MD5 hash of the bucket storage key in which -// the item will be stored. The choice of MD5 is only for hash performance -// reasons since its value is not used for any security sensitive operation. -func (s *StoragePackerV1) BucketKeyHashByItemID(itemID string) string { - return s.BucketKeyHashByKey(s.BucketPath(s.BucketKey(itemID))) +func (s *StoragePackerV1) BucketStorageKeyForItemID(itemID string) string { + hexVal := hex.EncodeToString(cryptoutil.Blake2b256Hash(itemID)) + + s.bucketsCacheLock.RLock() + _, bucketRaw, found := s.bucketsCache.LongestPrefix(hexVal) + s.bucketsCacheLock.RUnlock() + + if found { + return bucketRaw.(*LockedBucket).Key + } + + // If we have existing buckets we'd have parsed them in on startup + // (assuming that all users load all entries on startup), so this is a + // fresh storagepacker, so we use the root bits to return a proper number + // of chars. But first do that, lock, and try again to ensure nothing + // changed without holding a lock. + cacheKey := hexVal[0 : s.BaseBucketBits/4] + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.RLock() + + s.bucketsCacheLock.RLock() + _, bucketRaw, found = s.bucketsCache.LongestPrefix(hexVal) + s.bucketsCacheLock.RUnlock() + + lock.RUnlock() + + if found { + return bucketRaw.(*LockedBucket).Key + } + + return cacheKey } -// BucketKeyHashByKey returns the MD5 hash of the bucket storage key -func (s *StoragePackerV1) BucketKeyHashByKey(bucketKey string) string { - hf := md5.New() - hf.Write([]byte(bucketKey)) - return hex.EncodeToString(hf.Sum(nil)) +func (s *StoragePackerV1) BucketHashKeyForItemID(itemID string) string { + return GetCacheKey(s.BucketStorageKeyForItemID(itemID)) } -// View returns the storage view configured to be used by the packer -func (s *StoragePackerV1) StorageView() logical.Storage { - return s.View +func GetCacheKey(key string) string { + return strings.Replace(key, "/", "", -1) } // Get returns a bucket for a given key -func (s *StoragePackerV1) GetBucket(key string) (*Bucket, error) { +func (s *StoragePackerV1) GetBucket(key string) (*LockedBucket, error) { + cacheKey := GetCacheKey(key) + if key == "" { return nil, fmt.Errorf("missing bucket key") } - lock := locksutil.LockForKey(s.storageLocks, key) + lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.RLock() - defer lock.RUnlock() + + s.bucketsCacheLock.RLock() + _, bucketRaw, found := s.bucketsCache.LongestPrefix(cacheKey) + s.bucketsCacheLock.RUnlock() + + if found { + ret := bucketRaw.(*LockedBucket) + lock.RUnlock() + return ret, nil + } + + // Swap out for a write lock + lock.RUnlock() + lock.Lock() + defer lock.Unlock() + + // Check for it to have been added + s.bucketsCacheLock.RLock() + _, bucketRaw, found = s.bucketsCache.LongestPrefix(cacheKey) + s.bucketsCacheLock.RUnlock() + + if found { + ret := bucketRaw.(*LockedBucket) + return ret, nil + } // Read from the underlying view - storageEntry, err := s.View.Get(context.Background(), key) + storageEntry, err := s.BucketStorageView.Get(context.Background(), key) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) } @@ -128,6 +169,22 @@ func (s *StoragePackerV1) GetBucket(key string) (*Bucket, error) { return nil, nil } + bucket, err := s.DecodeBucket(storageEntry) + if err != nil { + return nil, err + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(cacheKey, bucket) + s.bucketsCacheLock.Unlock() + + return bucket, nil +} + +// NOTE: Don't put inserting into the cache here, as that will mess with +// upgrade cases for the identity store as we want to keep the bucket out of +// the cache until we actually re-store it. +func (s *StoragePackerV1) DecodeBucket(storageEntry *logical.StorageEntry) (*LockedBucket, error) { uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) if err != nil { return nil, errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) @@ -142,7 +199,89 @@ func (s *StoragePackerV1) GetBucket(key string) (*Bucket, error) { return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) } - return &bucket, nil + lb := &LockedBucket{ + Bucket: &bucket, + } + lb.Key = storageEntry.Key + + return lb, nil +} + +// Put stores a packed bucket entry +func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { + if bucket == nil { + return fmt.Errorf("nil bucket entry") + } + + if bucket.Key == "" { + return fmt.Errorf("missing key") + } + + cacheKey := GetCacheKey(bucket.Key) + + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.Lock() + + bucket.Lock() + err := s.storeBucket(bucket) + bucket.Unlock() + + lock.Unlock() + + return err +} + +// storeBucket actually stores the bucket. It expects that it's already locked. +func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { + marshaledBucket, err := proto.Marshal(bucket.Bucket) + if err != nil { + return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) + } + + compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{ + Type: compressutil.CompressionTypeSnappy, + }) + if err != nil { + return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) + } + + // Store the compressed value + err = s.BucketStorageView.Put(context.Background(), &logical.StorageEntry{ + Key: bucket.Key, + Value: compressedBucket, + }) + if err != nil { + return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(GetCacheKey(bucket.Key), bucket) + s.bucketsCacheLock.Unlock() + + return nil +} + +// DeleteBucket deletes an entire bucket entry +func (s *StoragePackerV1) DeleteBucket(key string) error { + if key == "" { + return fmt.Errorf("missing key") + } + + cacheKey := GetCacheKey(key) + + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.Lock() + defer lock.Unlock() + + if err := s.BucketStorageView.Delete(context.Background(), key); err != nil { + return errwrap.Wrapf("failed to delete packed storage entry: {{err}}", err) + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Delete(cacheKey) + s.bucketsCacheLock.Unlock() + + return nil } // upsert either inserts a new item into the bucket or updates an existing one @@ -180,55 +319,54 @@ func (s *Bucket) upsert(item *Item) error { return nil } -// BucketIndex returns the bucket key index for a given storage key -func (s *StoragePackerV1) BucketIndex(key string) uint8 { - hf := md5.New() - hf.Write([]byte(key)) - return uint8(hf.Sum(nil)[0]) -} - -// BucketKey returns the bucket key for a given item ID -func (s *StoragePackerV1) BucketKey(itemID string) string { - return strconv.Itoa(int(s.BucketIndex(itemID))) -} - // DeleteItem removes the storage entry which the given key refers to from its // corresponding bucket. func (s *StoragePackerV1) DeleteItem(itemID string) error { - if itemID == "" { return fmt.Errorf("empty item ID") } + var err error + // Get the bucket key - bucketKey := s.BucketKey(itemID) + bucketKey := s.BucketStorageKeyForItemID(itemID) + cacheKey := GetCacheKey(bucketKey) - // Prepend the view prefix - bucketPath := s.BucketPath(bucketKey) + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.Lock() + defer lock.Unlock() - // Read from underlying view - storageEntry, err := s.View.Get(context.Background(), bucketPath) - if err != nil { - return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) - } - if storageEntry == nil { - return nil - } + var bucket *LockedBucket - uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) - if err != nil { - return errwrap.Wrapf("failed to decompress packed storage value: {{err}}", err) - } - if notCompressed { - uncompressedData = storageEntry.Value - } + s.bucketsCacheLock.RLock() + _, bucketRaw, found := s.bucketsCache.LongestPrefix(cacheKey) + s.bucketsCacheLock.RUnlock() - var bucket Bucket - err = proto.Unmarshal(uncompressedData, &bucket) - if err != nil { - return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) + if found { + bucket = bucketRaw.(*LockedBucket) + } else { + // Read from underlying view + storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) + if err != nil { + return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) + } + if storageEntry == nil { + return nil + } + + bucket, err = s.DecodeBucket(storageEntry) + if err != nil { + return errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(cacheKey, bucket) + s.bucketsCacheLock.Unlock() } + bucket.Lock() + defer bucket.Unlock() + // Look for a matching storage entry foundIdx := -1 for itemIdx, item := range bucket.Items { @@ -244,7 +382,7 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) // Persist bucket entry only if there is an update - err = s.PutBucket(&bucket) + err = s.storeBucket(bucket) if err != nil { return err } @@ -253,70 +391,59 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { return nil } -// Put stores a packed bucket entry -func (s *StoragePackerV1) PutBucket(bucket *Bucket) error { - if bucket == nil { - return fmt.Errorf("nil bucket entry") +// GetItem fetches the storage entry for a given key from its corresponding +// bucket. +func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { + if itemID == "" { + return nil, fmt.Errorf("empty item ID") } - if bucket.Key == "" { - return fmt.Errorf("missing key") - } + bucketKey := s.BucketStorageKeyForItemID(itemID) + cacheKey := GetCacheKey(bucketKey) - if !strings.HasPrefix(bucket.Key, s.ViewPrefix) { - return fmt.Errorf("incorrect prefix; bucket entry key should have %q prefix", s.ViewPrefix) - } + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.RLock() + defer lock.RUnlock() - marshaledBucket, err := proto.Marshal(bucket) - if err != nil { - return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) - } + var bucket *LockedBucket - compressedBucket, err := compressutil.Compress(marshaledBucket, &compressutil.CompressionConfig{ - Type: compressutil.CompressionTypeSnappy, - }) - if err != nil { - return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) - } + s.bucketsCacheLock.RLock() + _, bucketRaw, found := s.bucketsCache.LongestPrefix(cacheKey) + s.bucketsCacheLock.RUnlock() - // Store the compressed value - err = s.View.Put(context.Background(), &logical.StorageEntry{ - Key: bucket.Key, - Value: compressedBucket, - }) - if err != nil { - return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) - } + if found { + bucket = bucketRaw.(*LockedBucket) + } else { + // Read from underlying view + storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) + if err != nil { + return nil, errwrap.Wrapf("failed to read packed storage value: {{err}}", err) + } + if storageEntry == nil { + return nil, nil + } - return nil -} + bucket, err = s.DecodeBucket(storageEntry) + if err != nil { + return nil, errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) + } -// GetItem fetches the storage entry for a given key from its corresponding -// bucket. -func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { - if itemID == "" { - return nil, fmt.Errorf("empty item ID") + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(cacheKey, bucket) + s.bucketsCacheLock.Unlock() } - bucketKey := s.BucketKey(itemID) - bucketPath := s.BucketPath(bucketKey) - - // Fetch the bucket entry - bucket, err := s.GetBucket(bucketPath) - if err != nil { - return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) - } - if bucket == nil { - return nil, nil - } + bucket.RLock() // Look for a matching storage entry in the bucket items for _, item := range bucket.Items { if item.ID == itemID { + bucket.RUnlock() return item, nil } } + bucket.RUnlock() return nil, nil } @@ -330,78 +457,75 @@ func (s *StoragePackerV1) PutItem(item *Item) error { return fmt.Errorf("missing ID in item") } - var err error - bucketKey := s.BucketKey(item.ID) - bucketPath := s.BucketPath(bucketKey) - - bucket := &Bucket{ - Key: bucketPath, - } + // Get the bucket key + bucketKey := s.BucketStorageKeyForItemID(item.ID) + cacheKey := GetCacheKey(bucketKey) - // In this case, we persist the storage entry regardless of the read - // storageEntry below is nil or not. Hence, directly acquire write lock - // even to read the entry. - lock := locksutil.LockForKey(s.storageLocks, bucketPath) + lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() defer lock.Unlock() - // Check if there is an existing bucket for a given key - storageEntry, err := s.View.Get(context.Background(), bucketPath) - if err != nil { - return errwrap.Wrapf("failed to read packed storage bucket entry: {{err}}", err) - } + var bucket *LockedBucket - if storageEntry == nil { - // If the bucket entry does not exist, this will be the only item the - // bucket that is going to be persisted. - bucket.Items = []*Item{ - item, - } + s.bucketsCacheLock.RLock() + _, bucketRaw, found := s.bucketsCache.LongestPrefix(cacheKey) + s.bucketsCacheLock.RUnlock() + + if found { + bucket = bucketRaw.(*LockedBucket) } else { - uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) + // Read from underlying view + storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) if err != nil { - return errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) - } - if notCompressed { - uncompressedData = storageEntry.Value + return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } - err = proto.Unmarshal(uncompressedData, bucket) - if err != nil { - return errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) + if storageEntry == nil { + bucket = &LockedBucket{ + Bucket: &Bucket{ + Key: bucketKey, + }, + } + } else { + bucket, err = s.DecodeBucket(storageEntry) + if err != nil { + return errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(cacheKey, bucket) + s.bucketsCacheLock.Unlock() } + } - err = bucket.upsert(item) - if err != nil { - return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) - } + bucket.Lock() + defer bucket.Unlock() + + if err := bucket.upsert(item); err != nil { + return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) } // Persist the result - return s.PutBucket(bucket) + return s.storeBucket(bucket) } // NewStoragePackerV1 creates a new storage packer for a given view func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, error) { - if config.View == nil { - return nil, fmt.Errorf("nil view") - } - - if config.ViewPrefix == "" { - config.ViewPrefix = DefaultStoragePackerBucketsPrefix + if config.BucketStorageView == nil { + return nil, fmt.Errorf("nil buckets view") } - if !strings.HasSuffix(config.ViewPrefix, "/") { - config.ViewPrefix = config.ViewPrefix + "/" + if config.ConfigStorageView == nil { + return nil, fmt.Errorf("nil config view") } - if config.BucketBaseCount == 0 { - config.BucketBaseCount = defaultBucketBaseCount + if config.BaseBucketBits == 0 { + config.BaseBucketBits = defaultBaseBucketBits } // At this point, look for an existing saved configuration var needPersist bool - entry, err := config.View.Get(ctx, config.ViewPrefix+"config") + entry, err := config.ConfigStorageView.Get(ctx, "config") if err != nil { return nil, errwrap.Wrapf("error checking for existing storagepacker config: {{err}}", err) } @@ -411,12 +535,9 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, if err := entry.DecodeJSON(&exist); err != nil { return nil, errwrap.Wrapf("error decoding existing storagepacker config: {{err}}", err) } - // If we have an existing config, we copy the only two things we need - // constant: - // - // 1. The bucket base count, so we know how many to expect - // 2. The base hash type. We need to know how to hash at the base. All - // shards will use Blake. + // If we have an existing config, we copy the only thing we need + // constant: the bucket base count, so we know how many to expect at + // the base level // // The rest of the values can change; the max size can change based on // e.g. if storage is migrated, so as long as we don't move to a new @@ -426,36 +547,38 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, // it's sharded; if we realize it's more efficient to do some other // value later we can update it and use that going forward for new // shards. - config.BucketBaseCount = exist.BucketBaseCount - config.BaseHashType = exist.BaseHashType + config.BaseBucketBits = exist.BaseBucketBits } - if config.BucketShardCount == 0 { - config.BucketShardCount = defaultBucketShardCount + if config.BucketShardBits == 0 { + config.BucketShardBits = defaultBucketShardBits } if config.BucketMaxSize == 0 { config.BucketMaxSize = defaultBucketMaxSize } - if !isPowerOfTwo(config.BucketBaseCount) { - return nil, fmt.Errorf("bucket base count of %d is not a power of two", config.BucketBaseCount) + if config.BaseBucketBits%4 != 0 { + return nil, fmt.Errorf("bucket base bits of %d is not a multiple of four", config.BaseBucketBits) } - if !isPowerOfTwo(config.BucketShardCount) { - return nil, fmt.Errorf("bucket shard count of %d is not a power of two", config.BucketShardCount) + if config.BucketShardBits%4 != 0 { + return nil, fmt.Errorf("bucket shard count of %d is not a power of two", config.BucketShardBits) } - if config.BucketShardCount < 2 { - return nil, fmt.Errorf("bucket shard count should at least be 2") + if config.BaseBucketBits < 4 { + return nil, errors.New("bucket base bits should be at least 4") + } + if config.BucketShardBits < 4 { + return nil, errors.New("bucket shard count should at least be 4") } if needPersist { - entry, err := logical.StorageEntryJSON(config.ViewPrefix+"config", config) + entry, err := logical.StorageEntryJSON("config", config) if err != nil { return nil, errwrap.Wrapf("error encoding storagepacker config: {{err}}", err) } - if err := config.View.Put(ctx, entry); err != nil { + if err := config.ConfigStorageView.Put(ctx, entry); err != nil { return nil, errwrap.Wrapf("error storing storagepacker config: {{err}}", err) } } @@ -469,15 +592,3 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, return packer, nil } - -// isPowerOfTwo returns true if the given value is a power of two, false -// otherwise. We also return false on 1 because there'd be no point. -func isPowerOfTwo(val int) bool { - switch val { - case 0, 1: - return false - default: - return val&(val-1) == 0 - } - return false -} diff --git a/vault/identity_store.go b/vault/identity_store.go index 4f340420cdffe..68eb93de2636c 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -19,8 +19,8 @@ import ( ) const ( - entityBucketsPrefix = "packer/buckets/" - groupBucketsPrefix = "packer/group/buckets/" + entityStoragePackerPrefix = "packer/" + groupStoragePackerPrefix = "packer/group/" ) var ( @@ -63,34 +63,52 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") core.AddLogger(groupsPackerLogger) - // If we find buckets, we've already written values so we fall back to md5 - // at the top level for compatibility. If we don't, this is a fresh install - // and we use Blake at the top level. - baseHashType := storagepacker.HashTypeBlake2b256 - vals, err := iStore.view.List(ctx, entityBucketsPrefix) + // Check for the upgrade case + entitiesBucketStorageView := logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"buckets/") + vals, err := entitiesBucketStorageView.List(ctx, "") if err != nil { return nil, err } + entityBucketsToUpgrade := make([]string, 0, 256) + for _, val := range vals { + if val == "v2/" { + continue + } + entityBucketsToUpgrade = append(entityBucketsToUpgrade, val) + } if len(vals) > 0 { - baseHashType = storagepacker.HashTypeMD5 + // TODO } iStore.entityPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - View: iStore.view, - ViewPrefix: entityBucketsPrefix, - Logger: entitiesPackerLogger, - BaseHashType: baseHashType, + BucketStorageView: logical.NewStorageView(entitiesBucketStorageView, "v2/"), + ConfigStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"config/"), + Logger: entitiesPackerLogger, }) if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } + groupsBucketStorageView := logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/") + vals, err = groupsBucketStorageView.List(ctx, "") + if err != nil { + return nil, err + } + + groupBucketsToUpgrade := make([]string, 0, 256) + for _, val := range vals { + if val == "v2/" { + continue + } + groupBucketsToUpgrade = append(groupBucketsToUpgrade, val) + } + if len(vals) > 0 { + // TODO + } iStore.groupPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - View: iStore.view, - ViewPrefix: groupBucketsPrefix, - Logger: groupsPackerLogger, - BaseHashType: baseHashType, + BucketStorageView: logical.NewStorageView(groupsBucketStorageView, "v2/"), + ConfigStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"config/"), + Logger: groupsPackerLogger, }) - if err != nil { return nil, errwrap.Wrapf("failed to create group packer: {{err}}", err) } @@ -132,13 +150,8 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { switch { // Check if the key is a storage entry key for an entity bucket - case strings.HasPrefix(key, storagepacker.DefaultStoragePackerBucketsPrefix): - // Get the hash value of the storage bucket entry key - bucketKeyHash := i.entityPacker.BucketKeyHashByKey(key) - if len(bucketKeyHash) == 0 { - i.logger.Error("failed to get the bucket entry key hash") - return - } + case strings.HasPrefix(key, i.entityPacker.BucketsView().Prefix()): + bucketKeyHash := storagepacker.GetCacheKey(strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) // Create a MemDB transaction txn := i.db.Txn(true) @@ -148,9 +161,9 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { // entry key of the entity bucket. Fetch all the entities that // belong to this bucket using the hash value. Remove these entities // from MemDB along with all the aliases of each entity. - entitiesFetched, err := i.MemDBEntitiesByBucketEntryKeyHashInTxn(txn, string(bucketKeyHash)) + entitiesFetched, err := i.MemDBEntitiesByBucketEntryKeyHashInTxn(txn, bucketKeyHash) if err != nil { - i.logger.Error("failed to fetch entities using the bucket entry key hash", "bucket_entry_key_hash", bucketKeyHash) + i.logger.Error("failed to fetch entities using bucket hash key", "bucket_entry_key_hash", bucketKeyHash) return } @@ -206,13 +219,8 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { return // Check if the key is a storage entry key for an group bucket - case strings.HasPrefix(key, groupBucketsPrefix): - // Get the hash value of the storage bucket entry key - bucketKeyHash := i.groupPacker.BucketKeyHashByKey(key) - if len(bucketKeyHash) == 0 { - i.logger.Error("failed to get the bucket entry key hash") - return - } + case strings.HasPrefix(key, i.groupPacker.BucketsView().Prefix()): + bucketKeyHash := storagepacker.GetCacheKey(strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) // Create a MemDB transaction txn := i.db.Txn(true) @@ -310,7 +318,6 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto entity.LastUpdateTime = oldEntity.LastUpdateTime entity.MergedEntityIDs = oldEntity.MergedEntityIDs entity.Policies = oldEntity.Policies - entity.BucketKeyHash = oldEntity.BucketKeyHash entity.MFASecrets = oldEntity.MFASecrets // Copy each alias individually since the format of aliases were // also different @@ -332,6 +339,8 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto persistNeeded = true } + entity.BucketKeyHash = i.entityPacker.BucketHashKeyForItemID(entity.ID) + pN, err := parseExtraEntityFromBucket(ctx, i, &entity) if err != nil { return nil, err @@ -380,6 +389,8 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide group.NamespaceID = namespace.RootNamespaceID } + group.BucketKeyHash = i.groupPacker.BucketHashKeyForItemID(group.ID) + return &group, nil } diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 9c29ee8eb58f1..0a07a5e86fa47 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -76,96 +76,149 @@ func (i *IdentityStore) sanitizeName(name string) string { func (i *IdentityStore) loadGroups(ctx context.Context) error { i.logger.Debug("identity loading groups") - allBuckets := make([]string, 0, 257) - - var walkPrefixes func(in string) error - walkPrefixes = func(in string) error { - existing, err := i.groupPacker.StorageView().List(ctx, in) - if err != nil { - return errwrap.Wrapf("failed to scan for groups: {{err}}", err) - } - for _, key := range existing { - if key == "config" { - continue - } - if key[len(key)-1] == '/' { - if err := walkPrefixes(key); err != nil { - return err - } - } else { - allBuckets = append(allBuckets, key) - } - } - return nil - } - if err := walkPrefixes(groupBucketsPrefix); err != nil { - return err + allBuckets, err := logical.CollectKeys(ctx, i.groupPacker.BucketsView()) + if err != nil { + return errwrap.Wrapf("failed to scan for group buckets: {{err}}", err) } i.logger.Debug("group buckets collected", "num_existing", len(allBuckets)) - for _, key := range allBuckets { - bucket, err := i.groupPacker.GetBucket(i.groupPacker.BucketPath(key)) - if err != nil { - return err - } - if bucket == nil { - continue - } + // Make the channels used for the worker pool + broker := make(chan string) + quit := make(chan bool) - for _, item := range bucket.Items { - group, err := i.parseGroupFromBucketItem(item) - if err != nil { - return err - } - if group == nil { - continue + // Buffer these channels to prevent deadlocks + errs := make(chan error, len(allBuckets)) + result := make(chan *storagepacker.LockedBucket, len(allBuckets)) + + // Use a wait group + wg := &sync.WaitGroup{} + + // Create 64 workers to distribute work to + for j := 0; j < consts.ExpirationRestoreWorkerCount; j++ { + wg.Add(1) + go func() { + defer wg.Done() + + for { + select { + case bucketKey, ok := <-broker: + // broker has been closed, we are done + if !ok { + return + } + + bucket, err := i.groupPacker.GetBucket(bucketKey) + if err != nil { + errs <- err + continue + } + + // Write results out to the result channel + result <- bucket + + // quit early + case <-quit: + return + } } + }() + } - // Ensure that there are no groups with duplicate names - groupByName, err := i.MemDBGroupByName(ctx, group.Name, false) - if err != nil { - return err + // Distribute the collected keys to the workers in a go routine + wg.Add(1) + go func() { + defer wg.Done() + for j, bucketKey := range allBuckets { + if j%500 == 0 { + i.logger.Debug("groups loading", "progress", j) } - if groupByName != nil { - i.logger.Warn(errDuplicateIdentityName.Error(), "group_name", group.Name, "conflicting_group_name", groupByName.Name, "action", "merge the contents of duplicated groups into one and delete the other") - if !i.disableLowerCasedNames { - return errDuplicateIdentityName - } + + select { + case <-quit: + return + + default: + broker <- bucketKey } + } + + // Close the broker, causing worker routines to exit + close(broker) + }() + + // Restore each key by pulling from the result chan + for j := 0; j < len(allBuckets); j++ { + select { + case err := <-errs: + // Close all go routines + close(quit) - if i.logger.IsDebug() { - i.logger.Debug("loading group", "name", group.Name, "id", group.ID) + return err + + case bucket := <-result: + // If there is no entry, nothing to restore + if bucket == nil { + continue } - txn := i.db.Txn(true) + for _, item := range bucket.Items { + group, err := i.parseGroupFromBucketItem(item) + if err != nil { + return err + } + if group == nil { + continue + } - // Before pull#5786, entity memberships in groups were not getting - // updated when respective entities were deleted. This is here to - // check that the entity IDs in the group are indeed valid, and if - // not remove them. - persist := false - for _, memberEntityID := range group.MemberEntityIDs { - entity, err := i.MemDBEntityByID(memberEntityID, false) + // Ensure that there are no groups with duplicate names + groupByName, err := i.MemDBGroupByName(ctx, group.Name, false) if err != nil { return err } - if entity == nil { - persist = true - group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, memberEntityID) + if groupByName != nil { + i.logger.Warn(errDuplicateIdentityName.Error(), "group_name", group.Name, "conflicting_group_name", groupByName.Name, "action", "merge the contents of duplicated groups into one and delete the other") + if !i.disableLowerCasedNames { + return errDuplicateIdentityName + } } - } - err = i.UpsertGroupInTxn(txn, group, persist) - if err != nil { - txn.Abort() - return errwrap.Wrapf("failed to update group in memdb: {{err}}", err) - } + if i.logger.IsDebug() { + i.logger.Debug("loading group", "name", group.Name, "id", group.ID) + } - txn.Commit() + txn := i.db.Txn(true) + + // Before pull#5786, entity memberships in groups were not getting + // updated when respective entities were deleted. This is here to + // check that the entity IDs in the group are indeed valid, and if + // not remove them. + persist := false + for _, memberEntityID := range group.MemberEntityIDs { + entity, err := i.MemDBEntityByID(memberEntityID, false) + if err != nil { + return err + } + if entity == nil { + persist = true + group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, memberEntityID) + } + } + + err = i.UpsertGroupInTxn(txn, group, persist) + if err != nil { + txn.Abort() + return errwrap.Wrapf("failed to update group in memdb: {{err}}", err) + } + + txn.Commit() + } } } + // Let all go routines finish + wg.Wait() + if i.logger.IsInfo() { i.logger.Info("groups restored") } @@ -176,35 +229,9 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { func (i *IdentityStore) loadEntities(ctx context.Context) error { // Accumulate existing entities i.logger.Debug("loading entities") - existing, err := i.entityPacker.StorageView().List(ctx, storagepacker.DefaultStoragePackerBucketsPrefix) + allBuckets, err := logical.CollectKeys(ctx, i.entityPacker.BucketsView()) if err != nil { - return errwrap.Wrapf("failed to scan for entities: {{err}}", err) - } - - allBuckets := make([]string, 0, 257) - - var walkPrefixes func(in string) error - walkPrefixes = func(in string) error { - existing, err := i.entityPacker.StorageView().List(ctx, in) - if err != nil { - return errwrap.Wrapf("failed to scan for entities: {{err}}", err) - } - for _, key := range existing { - if key == "config" { - continue - } - if key[len(key)-1] == '/' { - if err := walkPrefixes(key); err != nil { - return err - } - } else { - allBuckets = append(allBuckets, key) - } - } - return nil - } - if err := walkPrefixes(entityBucketsPrefix); err != nil { - return err + return errwrap.Wrapf("failed to scan for entity buckets: {{err}}", err) } i.logger.Debug("entity buckets collected", "num_existing", len(allBuckets)) @@ -215,7 +242,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { // Buffer these channels to prevent deadlocks errs := make(chan error, len(allBuckets)) - result := make(chan *storagepacker.Bucket, len(allBuckets)) + result := make(chan *storagepacker.LockedBucket, len(allBuckets)) // Use a wait group wg := &sync.WaitGroup{} @@ -234,7 +261,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { return } - bucket, err := i.entityPacker.GetBucket(i.entityPacker.BucketPath(bucketKey)) + bucket, err := i.entityPacker.GetBucket(bucketKey) if err != nil { errs <- err continue @@ -255,7 +282,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { wg.Add(1) go func() { defer wg.Done() - for j, bucketKey := range existing { + for j, bucketKey := range allBuckets { if j%500 == 0 { i.logger.Debug("entities loading", "progress", j) } @@ -274,7 +301,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { }() // Restore each key by pulling from the result chan - for j := 0; j < len(existing); j++ { + for j := 0; j < len(allBuckets); j++ { select { case err := <-errs: // Close all go routines @@ -975,7 +1002,7 @@ func (i *IdentityStore) sanitizeEntity(ctx context.Context, entity *identity.Ent } // Set the hash value of the storage bucket key in entity - entity.BucketKeyHash = i.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = i.entityPacker.BucketHashKeyForItemID(entity.ID) } ns, err := namespace.FromContext(ctx) @@ -1035,7 +1062,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident } // Set the hash value of the storage bucket key in group - group.BucketKeyHash = i.groupPacker.BucketKeyHashByItemID(group.ID) + group.BucketKeyHash = i.groupPacker.BucketHashKeyForItemID(group.ID) } if group.NamespaceID == "" { From 6effe9e7afc86d89105d1c836d1b2079402fd0a9 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 16:30:51 -0500 Subject: [PATCH 10/38] Use ItemMap instead of Items --- helper/storagepacker/storagepacker_v1.go | 66 ++++++++-------------- helper/storagepacker/types.pb.go | 70 ++++++++++++------------ helper/storagepacker/types.proto | 10 ++-- vault/identity_store.go | 8 +-- vault/identity_store_util.go | 29 ++++++---- 5 files changed, 88 insertions(+), 95 deletions(-) diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index 3285d800caa82..57459cf1e79a0 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -10,6 +10,7 @@ import ( radix "github.com/armon/go-radix" "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/compressutil" @@ -299,23 +300,11 @@ func (s *Bucket) upsert(item *Item) error { return fmt.Errorf("missing item ID") } - // Look for an item with matching key and don't modify the collection - // while iterating - foundIdx := -1 - for itemIdx, bucketItems := range s.Items { - if bucketItems.ID == item.ID { - foundIdx = itemIdx - break - } - } - - // If there is no match, append the item, otherwise update it - if foundIdx == -1 { - s.Items = append(s.Items, item) - } else { - s.Items[foundIdx] = item + if s.ItemMap == nil { + s.ItemMap = make(map[string]*any.Any) } + s.ItemMap[item.ID] = item.Message return nil } @@ -326,8 +315,6 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { return fmt.Errorf("empty item ID") } - var err error - // Get the bucket key bucketKey := s.BucketStorageKeyForItemID(itemID) cacheKey := GetCacheKey(bucketKey) @@ -367,28 +354,17 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { bucket.Lock() defer bucket.Unlock() - // Look for a matching storage entry - foundIdx := -1 - for itemIdx, item := range bucket.Items { - if item.ID == itemID { - foundIdx = itemIdx - break - } + if len(bucket.ItemMap) == 0 { + return nil } - // If there is a match, remove it from the collection and persist the - // resulting collection - if foundIdx != -1 { - bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) - - // Persist bucket entry only if there is an update - err = s.storeBucket(bucket) - if err != nil { - return err - } + _, ok := bucket.ItemMap[itemID] + if !ok { + return nil } - return nil + delete(bucket.ItemMap, itemID) + return s.storeBucket(bucket) } // GetItem fetches the storage entry for a given key from its corresponding @@ -435,16 +411,22 @@ func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { bucket.RLock() - // Look for a matching storage entry in the bucket items - for _, item := range bucket.Items { - if item.ID == itemID { - bucket.RUnlock() - return item, nil - } + if len(bucket.ItemMap) == 0 { + bucket.RUnlock() + return nil, nil + } + + item, ok := bucket.ItemMap[itemID] + if !ok { + bucket.RUnlock() + return nil, nil } bucket.RUnlock() - return nil, nil + return &Item{ + ID: itemID, + Message: item, + }, nil } // PutItem stores a storage entry in its corresponding bucket diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 6e5554577a797..ef3974e1f3fc2 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -80,14 +80,14 @@ func (m *Item) GetMessage() *any.Any { type Bucket struct { // Key is the storage path where the bucket gets stored Key string `sentinel:"" protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - // Items holds the items contained within this bucket + // Items holds the items contained within this bucket. Used by v1. Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` + // ItemMap stores a mapping of item ID to message. Used by v2. + ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Buckets are the buckets contained within this bucket - Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Sharded indicates if the contained buckets are pushed out or not - Sharded bool `sentinel:"" protobuf:"varint,4,opt,name=sharded,proto3" json:"sharded,omitempty"` - // The number of shards created in this bucket - ShardCount uint32 `sentinel:"" protobuf:"varint,5,opt,name=shard_count,json=shardCount,proto3" json:"shard_count,omitempty"` + Sharded bool `sentinel:"" protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"` // Size of this bucket in number of bytes Size uint32 `sentinel:"" protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -134,6 +134,13 @@ func (m *Bucket) GetItems() []*Item { return nil } +func (m *Bucket) GetItemMap() map[string]*any.Any { + if m != nil { + return m.ItemMap + } + return nil +} + func (m *Bucket) GetBuckets() map[string]*Bucket { if m != nil { return m.Buckets @@ -148,13 +155,6 @@ func (m *Bucket) GetSharded() bool { return false } -func (m *Bucket) GetShardCount() uint32 { - if m != nil { - return m.ShardCount - } - return 0 -} - func (m *Bucket) GetSize() uint32 { if m != nil { return m.Size @@ -166,31 +166,33 @@ func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") proto.RegisterMapType((map[string]*Bucket)(nil), "storagepacker.Bucket.BucketsEntry") + proto.RegisterMapType((map[string]*any.Any)(nil), "storagepacker.Bucket.ItemMapEntry") } func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } var fileDescriptor_c0e98c66c4f51b7f = []byte{ - // 321 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xf4, 0x30, - 0x10, 0xc6, 0x69, 0xf7, 0xdf, 0xfb, 0xce, 0xba, 0x22, 0x51, 0x21, 0xee, 0xc5, 0xb2, 0xa7, 0x8a, - 0x90, 0xe0, 0x7a, 0x11, 0xf1, 0xe2, 0x8a, 0x82, 0x47, 0x73, 0xf4, 0x22, 0x69, 0x3b, 0xb6, 0x65, - 0xdb, 0xa6, 0x24, 0xe9, 0x42, 0xfd, 0x3e, 0x7e, 0x4f, 0xd9, 0x74, 0x0b, 0xae, 0xec, 0xa9, 0xd3, - 0x79, 0x7e, 0x79, 0xe6, 0xc9, 0x04, 0x82, 0x0c, 0x8b, 0x1a, 0x35, 0x37, 0x56, 0x69, 0x99, 0x62, - 0x2d, 0xe3, 0x35, 0x6a, 0x6e, 0xdb, 0x1a, 0x0d, 0xab, 0xb5, 0xb2, 0x8a, 0xcc, 0xf6, 0xa4, 0xf9, - 0x45, 0xaa, 0x54, 0x5a, 0x20, 0x77, 0x62, 0xd4, 0x7c, 0x72, 0x59, 0xb5, 0x1d, 0xb9, 0x78, 0x81, - 0xe1, 0xab, 0xc5, 0x92, 0x1c, 0x83, 0x9f, 0x27, 0xd4, 0x0b, 0xbc, 0xf0, 0xbf, 0xf0, 0xf3, 0x84, - 0x30, 0x98, 0x94, 0x68, 0x8c, 0x4c, 0x91, 0xfa, 0x81, 0x17, 0x4e, 0x97, 0x67, 0xac, 0x33, 0x61, - 0xbd, 0x09, 0x7b, 0xac, 0x5a, 0xd1, 0x43, 0x8b, 0x6f, 0x1f, 0xc6, 0xab, 0x26, 0x5e, 0xa3, 0x25, - 0x27, 0x30, 0x58, 0x63, 0xbb, 0xf3, 0xda, 0x96, 0xe4, 0x0a, 0x46, 0xb9, 0xc5, 0xd2, 0x50, 0x3f, - 0x18, 0x84, 0xd3, 0xe5, 0x29, 0xdb, 0x8b, 0xc7, 0xb6, 0x01, 0x44, 0x47, 0x90, 0x07, 0x98, 0x44, - 0xce, 0xc6, 0xd0, 0x81, 0x83, 0x17, 0x7f, 0xe0, 0x6e, 0xc8, 0xee, 0x63, 0x9e, 0x2b, 0xab, 0x5b, - 0xd1, 0x1f, 0x21, 0x14, 0x26, 0x26, 0x93, 0x3a, 0xc1, 0x84, 0x0e, 0x03, 0x2f, 0xfc, 0x27, 0xfa, - 0x5f, 0x72, 0x09, 0x53, 0x57, 0x7e, 0xc4, 0xaa, 0xa9, 0x2c, 0x1d, 0x05, 0x5e, 0x38, 0x13, 0xe0, - 0x5a, 0x4f, 0xdb, 0x0e, 0x21, 0x30, 0x34, 0xf9, 0x17, 0xd2, 0xb1, 0x53, 0x5c, 0x3d, 0x7f, 0x83, - 0xa3, 0xdf, 0x73, 0x0e, 0xdc, 0xec, 0x1a, 0x46, 0x1b, 0x59, 0x34, 0xfd, 0x92, 0xce, 0x0f, 0x86, - 0x15, 0x1d, 0x73, 0xef, 0xdf, 0x79, 0xab, 0x9b, 0x77, 0x9e, 0xe6, 0x36, 0x6b, 0x22, 0x16, 0xab, - 0x92, 0x67, 0xd2, 0x64, 0x79, 0xac, 0x74, 0xcd, 0x37, 0xb2, 0x29, 0x2c, 0x3f, 0xf4, 0xb0, 0xd1, - 0xd8, 0x6d, 0xfc, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x0f, 0xb0, 0x6d, 0x06, 0xf7, 0x01, 0x00, - 0x00, + // 341 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xeb, 0x40, + 0x10, 0xc6, 0x49, 0xd2, 0x36, 0x7d, 0xd3, 0xf6, 0xf1, 0xd8, 0xa7, 0xb0, 0xf6, 0x14, 0x7a, 0x8a, + 0x0a, 0xbb, 0x58, 0x2f, 0x22, 0x7a, 0xb0, 0xa0, 0xe0, 0x41, 0xd0, 0x1c, 0xbd, 0xc8, 0x26, 0x19, + 0x93, 0xd0, 0xa4, 0x09, 0xbb, 0x9b, 0x42, 0xfc, 0x3f, 0xfc, 0x7f, 0xa5, 0xd9, 0x16, 0x5a, 0x09, + 0x3d, 0xed, 0x0c, 0xf3, 0x7d, 0xbf, 0xfd, 0x66, 0x59, 0xf0, 0x52, 0xcc, 0x2b, 0x94, 0x5c, 0xe9, + 0x52, 0x8a, 0x04, 0x2b, 0x11, 0x2d, 0x51, 0x72, 0xdd, 0x54, 0xa8, 0x58, 0x25, 0x4b, 0x5d, 0x92, + 0xc9, 0xc1, 0x68, 0x7a, 0x96, 0x94, 0x65, 0x92, 0x23, 0x6f, 0x87, 0x61, 0xfd, 0xc9, 0xc5, 0xaa, + 0x31, 0xca, 0xd9, 0x13, 0xf4, 0x9e, 0x35, 0x16, 0xe4, 0x2f, 0xd8, 0x59, 0x4c, 0x2d, 0xcf, 0xf2, + 0xff, 0x04, 0x76, 0x16, 0x13, 0x06, 0x6e, 0x81, 0x4a, 0x89, 0x04, 0xa9, 0xed, 0x59, 0xfe, 0x68, + 0x7e, 0xc2, 0x0c, 0x84, 0xed, 0x20, 0xec, 0x61, 0xd5, 0x04, 0x3b, 0xd1, 0xec, 0xdb, 0x81, 0xc1, + 0xa2, 0x8e, 0x96, 0xa8, 0xc9, 0x3f, 0x70, 0x96, 0xd8, 0x6c, 0x59, 0x9b, 0x92, 0x9c, 0x43, 0x3f, + 0xd3, 0x58, 0x28, 0x6a, 0x7b, 0x8e, 0x3f, 0x9a, 0xff, 0x67, 0x07, 0xf1, 0xd8, 0x26, 0x40, 0x60, + 0x14, 0xe4, 0x1e, 0x86, 0x9b, 0xe2, 0xa3, 0x10, 0x15, 0x75, 0x5a, 0xf5, 0xec, 0x97, 0xda, 0xdc, + 0xd2, 0x9a, 0x5e, 0x44, 0xf5, 0xb8, 0xd2, 0xb2, 0x09, 0xdc, 0xcc, 0x74, 0xe4, 0x0e, 0xdc, 0xb0, + 0x9d, 0x2b, 0xda, 0x3b, 0xe6, 0x36, 0x87, 0xda, 0xba, 0xb7, 0x16, 0x42, 0xc1, 0x55, 0xa9, 0x90, + 0x31, 0xc6, 0xb4, 0xef, 0x59, 0xfe, 0x30, 0xd8, 0xb5, 0x84, 0x40, 0x4f, 0x65, 0x5f, 0x48, 0x07, + 0x9e, 0xe5, 0x4f, 0x82, 0xb6, 0x9e, 0xbe, 0xc2, 0x78, 0x3f, 0x44, 0xc7, 0xde, 0x17, 0xd0, 0x5f, + 0x8b, 0xbc, 0x3e, 0xfe, 0x84, 0x46, 0x72, 0x6b, 0xdf, 0x58, 0xd3, 0x37, 0x18, 0xef, 0x07, 0xeb, + 0x20, 0x5e, 0x1e, 0x12, 0x4f, 0x3b, 0xb7, 0xdb, 0x43, 0x2e, 0xae, 0xde, 0x79, 0x92, 0xe9, 0xb4, + 0x0e, 0x59, 0x54, 0x16, 0x3c, 0x15, 0x2a, 0xcd, 0xa2, 0x52, 0x56, 0x7c, 0x2d, 0xea, 0x5c, 0xf3, + 0xae, 0x8f, 0x14, 0x0e, 0xda, 0x78, 0xd7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x29, 0x1c, + 0x91, 0x67, 0x02, 0x00, 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 0683210e31fce..48b5afd211ac1 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -23,14 +23,14 @@ message Item { message Bucket { // Key is the storage path where the bucket gets stored string key = 1; - // Items holds the items contained within this bucket + // Items holds the items contained within this bucket. Used by v1. repeated Item items = 2; + // ItemMap stores a mapping of item ID to message. Used by v2. + map item_map = 3; // Buckets are the buckets contained within this bucket - map buckets = 3; + map buckets = 4; // Sharded indicates if the contained buckets are pushed out or not - bool sharded = 4; - // The number of shards created in this bucket - uint32 shard_count = 5; + bool sharded = 5; // Size of this bucket in number of bytes uint32 size = 6; } diff --git a/vault/identity_store.go b/vault/identity_store.go index 68eb93de2636c..7b5bda3cb7eda 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -199,8 +199,8 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { // case, entities in the updated bucket needs to be reinserted into // MemDB. if bucket != nil { - for _, item := range bucket.Items { - entity, err := i.parseEntityFromBucketItem(ctx, item) + for id, message := range bucket.ItemMap { + entity, err := i.parseEntityFromBucketItem(ctx, &storagepacker.Item{ID: id, Message: message}) if err != nil { i.logger.Error("failed to parse entity from bucket entry item", "error", err) return @@ -249,8 +249,8 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } if bucket != nil { - for _, item := range bucket.Items { - group, err := i.parseGroupFromBucketItem(item) + for id, message := range bucket.ItemMap { + group, err := i.parseGroupFromBucketItem(&storagepacker.Item{ID: id, Message: message}) if err != nil { i.logger.Error("failed to parse group from bucket entry item", "error", err) return diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 0a07a5e86fa47..8968f8ea1e3a0 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -6,6 +6,7 @@ import ( "fmt" "strings" "sync" + "sync/atomic" "github.com/golang/protobuf/ptypes" "github.com/hashicorp/errwrap" @@ -94,6 +95,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { // Use a wait group wg := &sync.WaitGroup{} + var restoreCount uint64 + // Create 64 workers to distribute work to for j := 0; j < consts.ExpirationRestoreWorkerCount; j++ { wg.Add(1) @@ -130,8 +133,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { go func() { defer wg.Done() for j, bucketKey := range allBuckets { - if j%500 == 0 { - i.logger.Debug("groups loading", "progress", j) + if j%50 == 0 { + i.logger.Debug("groups buckets loading", "progress", j) } select { @@ -162,8 +165,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { continue } - for _, item := range bucket.Items { - group, err := i.parseGroupFromBucketItem(item) + for id, message := range bucket.ItemMap { + group, err := i.parseGroupFromBucketItem(&storagepacker.Item{ID: id, Message: message}) if err != nil { return err } @@ -212,6 +215,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { } txn.Commit() + + atomic.AddUint64(&restoreCount, 1) } } } @@ -220,7 +225,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { wg.Wait() if i.logger.IsInfo() { - i.logger.Info("groups restored") + i.logger.Info("groups restored", "num_restored", atomic.LoadUint64(&restoreCount)) } return nil @@ -283,8 +288,8 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { go func() { defer wg.Done() for j, bucketKey := range allBuckets { - if j%500 == 0 { - i.logger.Debug("entities loading", "progress", j) + if j%50 == 0 { + i.logger.Debug("entities buckets loading", "progress", j) } select { @@ -300,6 +305,8 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { close(broker) }() + var restoreCount uint64 + // Restore each key by pulling from the result chan for j := 0; j < len(allBuckets); j++ { select { @@ -315,8 +322,8 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { continue } - for _, item := range bucket.Items { - entity, err := i.parseEntityFromBucketItem(ctx, item) + for id, message := range bucket.ItemMap { + entity, err := i.parseEntityFromBucketItem(ctx, &storagepacker.Item{ID: id, Message: message}) if err != nil { return err } @@ -342,6 +349,8 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { if err != nil { return errwrap.Wrapf("failed to update entity in MemDB: {{err}}", err) } + + atomic.AddUint64(&restoreCount, 1) } } } @@ -350,7 +359,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { wg.Wait() if i.logger.IsInfo() { - i.logger.Info("entities restored") + i.logger.Info("entities restored", "num_restored", atomic.LoadUint64(&restoreCount)) } return nil From 60a0f1358f7cf60fc4aba9bac76837dba40f8f67 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 16:52:54 -0500 Subject: [PATCH 11/38] Fix some bugs and tests --- helper/cryptoutil/cryptoutil_test.go | 5 +- helper/storagepacker/storagepacker_v1_test.go | 50 +++++++++---------- vault/identity_store_aliases_test.go | 2 +- vault/identity_store_entities_test.go | 4 +- vault/identity_store_group_aliases_test.go | 2 +- vault/identity_store_groups_test.go | 4 +- vault/identity_store_test.go | 4 +- 7 files changed, 32 insertions(+), 39 deletions(-) diff --git a/helper/cryptoutil/cryptoutil_test.go b/helper/cryptoutil/cryptoutil_test.go index f08ca397a101b..a277e4fcee400 100644 --- a/helper/cryptoutil/cryptoutil_test.go +++ b/helper/cryptoutil/cryptoutil_test.go @@ -3,10 +3,7 @@ package cryptoutil import "testing" func TestBlake2b256Hash(t *testing.T) { - hashVal, err = Blake2b256Hash("sampletext") - if err != nil { - t.Fatal(err) - } + hashVal := Blake2b256Hash("sampletext") if string(hashVal) == "" || string(hashVal) == "sampletext" { t.Fatalf("failed to hash the text") diff --git a/helper/storagepacker/storagepacker_v1_test.go b/helper/storagepacker/storagepacker_v1_test.go index 560c729a4dfaf..b9bde4fcfe8a1 100644 --- a/helper/storagepacker/storagepacker_v1_test.go +++ b/helper/storagepacker/storagepacker_v1_test.go @@ -1,9 +1,11 @@ package storagepacker import ( - "reflect" + "context" "testing" + "github.com/go-test/deep" + "github.com/gogo/protobuf/proto" "github.com/golang/protobuf/ptypes" log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" @@ -11,15 +13,22 @@ import ( "github.com/hashicorp/vault/logical" ) -func BenchmarkStoragePackerV1(b *testing.B) { - storagePacker, err := NewStoragePackerV1( - &logical.InmemStorage{}, - log.New(&log.LoggerOptions{Name: "storagepackertest"}), - "", - ) +func getStoragePacker(tb testing.TB) *StoragePackerV1 { + storage := &logical.InmemStorage{} + storageView := logical.NewStorageView(storage, "packer/buckets/v2") + storagePacker, err := NewStoragePackerV1(context.Background(), &Config{ + BucketStorageView: storageView, + ConfigStorageView: logical.NewStorageView(storage, "packer/config"), + Logger: log.New(&log.LoggerOptions{Name: "storagepackertest"}), + }) if err != nil { - b.Fatal(err) + tb.Fatal(err) } + return storagePacker +} + +func BenchmarkStoragePackerV1(b *testing.B) { + storagePacker := getStoragePacker(b) for i := 0; i < b.N; i++ { itemID, err := uuid.GenerateUUID() @@ -65,21 +74,14 @@ func BenchmarkStoragePackerV1(b *testing.B) { } func TestStoragePackerV1(t *testing.T) { - storagePacker, err := NewStoragePackerV1( - &logical.InmemStorage{}, - log.New(&log.LoggerOptions{Name: "storagepackertest"}), - "", - ) - if err != nil { - t.Fatal(err) - } + storagePacker := getStoragePacker(t) // Persist a storage entry item1 := &Item{ ID: "item1", } - err = storagePacker.PutItem(item1) + err := storagePacker.PutItem(item1) if err != nil { t.Fatal(err) } @@ -115,14 +117,7 @@ func TestStoragePackerV1(t *testing.T) { } func TestStoragePackerV1_SerializeDeserializeComplexItem_Version1(t *testing.T) { - storagePacker, err := NewStoragePackerV1( - &logical.InmemStorage{}, - log.New(&log.LoggerOptions{Name: "storagepackertest"}), - "", - ) - if err != nil { - t.Fatal(err) - } + storagePacker := getStoragePacker(t) timeNow := ptypes.TimestampNow() @@ -178,7 +173,8 @@ func TestStoragePackerV1_SerializeDeserializeComplexItem_Version1(t *testing.T) t.Fatal(err) } - if !reflect.DeepEqual(&itemDecoded, entity) { - t.Fatalf("bad: expected: %#v\nactual: %#v\n", entity, itemDecoded) + if !proto.Equal(&itemDecoded, entity) { + diff := deep.Equal(&itemDecoded, entity) + t.Fatal(diff) } } diff --git a/vault/identity_store_aliases_test.go b/vault/identity_store_aliases_test.go index 2e1f9cf860d4a..fa5384277a179 100644 --- a/vault/identity_store_aliases_test.go +++ b/vault/identity_store_aliases_test.go @@ -220,7 +220,7 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { Name: "testentityname", } - entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() diff --git a/vault/identity_store_entities_test.go b/vault/identity_store_entities_test.go index f23ad7ead7a62..3ae8b7d7fe54b 100644 --- a/vault/identity_store_entities_test.go +++ b/vault/identity_store_entities_test.go @@ -479,7 +479,7 @@ func TestIdentityStore_MemDBImmutability(t *testing.T) { }, } - entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() @@ -733,7 +733,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { }, } - entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() diff --git a/vault/identity_store_group_aliases_test.go b/vault/identity_store_group_aliases_test.go index ca1ca3c465324..7a0f5e57319a1 100644 --- a/vault/identity_store_group_aliases_test.go +++ b/vault/identity_store_group_aliases_test.go @@ -358,7 +358,7 @@ func TestIdentityStore_GroupAliases_MemDBIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid1", "testparentgroupid2"}, MemberEntityIDs: []string{"testentityid1", "testentityid2"}, Policies: []string{"testpolicy1", "testpolicy2"}, - BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid"), + BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid"), } txn := i.db.Txn(true) diff --git a/vault/identity_store_groups_test.go b/vault/identity_store_groups_test.go index d3043bf8a29f1..ee97e6093cc07 100644 --- a/vault/identity_store_groups_test.go +++ b/vault/identity_store_groups_test.go @@ -436,7 +436,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid1", "testparentgroupid2"}, MemberEntityIDs: []string{"testentityid1", "testentityid2"}, Policies: []string{"testpolicy1", "testpolicy2"}, - BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid"), + BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid"), } // Insert it into memdb @@ -459,7 +459,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid2", "testparentgroupid3"}, MemberEntityIDs: []string{"testentityid2", "testentityid3"}, Policies: []string{"testpolicy2", "testpolicy3"}, - BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid2"), + BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid2"), } // Insert it into memdb diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index ae8a23f86a1ad..f0bc76e602817 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -410,7 +410,7 @@ func TestIdentityStore_MergeConflictingAliases(t *testing.T) { alias, }, } - entity.BucketKeyHash = c.identityStore.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = c.identityStore.entityPacker.BucketHashKeyForItemID(entity.ID) // Now add the alias to two entities, skipping all existing checking by // writing directly entityAny, err := ptypes.MarshalAny(entity) @@ -430,7 +430,7 @@ func TestIdentityStore_MergeConflictingAliases(t *testing.T) { entity.Policies = []string{"bar", "baz"} alias.ID = "alias2" alias.CanonicalID = "entity2" - entity.BucketKeyHash = c.identityStore.entityPacker.BucketKeyHashByItemID(entity.ID) + entity.BucketKeyHash = c.identityStore.entityPacker.BucketHashKeyForItemID(entity.ID) entityAny, err = ptypes.MarshalAny(entity) if err != nil { t.Fatal(err) From 49276fc0f69b06cf2b29fd481687fccf20032825 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 17:03:29 -0500 Subject: [PATCH 12/38] Switch locksutil to blake --- helper/locksutil/locks.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/helper/locksutil/locks.go b/helper/locksutil/locks.go index e0c2fcdd8b58c..2ec4cf4e24c7b 100644 --- a/helper/locksutil/locks.go +++ b/helper/locksutil/locks.go @@ -1,8 +1,9 @@ package locksutil import ( - "crypto/md5" "sync" + + "github.com/hashicorp/vault/helper/cryptoutil" ) const ( @@ -34,9 +35,7 @@ func CreateLocks() []*LockEntry { } func LockIndexForKey(key string) uint8 { - hf := md5.New() - hf.Write([]byte(key)) - return uint8(hf.Sum(nil)[0]) + return uint8(cryptoutil.Blake2b256Hash(key)[0]) } func LockForKey(locks []*LockEntry, key string) *LockEntry { From 50db521841440bd3dc5cd8b603d00116a0bba4d3 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 20:38:36 -0500 Subject: [PATCH 13/38] Add queue funcs and rename something back --- helper/storagepacker/storagepacker_v1.go | 50 +++++++++++++++++----- vault/identity_store.go | 4 +- vault/identity_store_aliases_test.go | 2 +- vault/identity_store_entities_test.go | 4 +- vault/identity_store_group_aliases_test.go | 2 +- vault/identity_store_groups_test.go | 4 +- vault/identity_store_test.go | 4 +- vault/identity_store_util.go | 4 +- 8 files changed, 52 insertions(+), 22 deletions(-) diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index 57459cf1e79a0..ea3f1a75c1691 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -7,12 +7,14 @@ import ( "fmt" "strings" "sync" + "sync/atomic" radix "github.com/armon/go-radix" "github.com/golang/protobuf/proto" any "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" + multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/compressutil" "github.com/hashicorp/vault/helper/cryptoutil" "github.com/hashicorp/vault/helper/locksutil" @@ -71,6 +73,9 @@ type StoragePackerV1 struct { // So we can get away with only locking just when modifying, because we // should already be locked in terms of an entry overwriting itself. bucketsCacheLock sync.RWMutex + + queueMode uint32 + queuedBuckets sync.Map } // LockedBucket embeds a bucket and its corresponding lock to ensure thread @@ -117,7 +122,7 @@ func (s *StoragePackerV1) BucketStorageKeyForItemID(itemID string) string { return cacheKey } -func (s *StoragePackerV1) BucketHashKeyForItemID(itemID string) string { +func (s *StoragePackerV1) BucketKeyHashByItemID(itemID string) string { return GetCacheKey(s.BucketStorageKeyForItemID(itemID)) } @@ -222,18 +227,21 @@ func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() + defer lock.Unlock() bucket.Lock() - err := s.storeBucket(bucket) - bucket.Unlock() - - lock.Unlock() + defer bucket.Unlock() - return err + return s.storeBucket(bucket, false) } // storeBucket actually stores the bucket. It expects that it's already locked. -func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { +func (s *StoragePackerV1) storeBucket(bucket *LockedBucket, flushMode bool) error { + if !flushMode && atomic.LoadUint32(&s.queueMode) == 1 { + s.queuedBuckets.Store(bucket.Key, bucket) + return nil + } + marshaledBucket, err := proto.Marshal(bucket.Bucket) if err != nil { return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) @@ -287,7 +295,7 @@ func (s *StoragePackerV1) DeleteBucket(key string) error { // upsert either inserts a new item into the bucket or updates an existing one // if an item with a matching key is already present. -func (s *Bucket) upsert(item *Item) error { +func (s *LockedBucket) upsert(item *Item) error { if s == nil { return fmt.Errorf("nil storage bucket") } @@ -364,7 +372,7 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { } delete(bucket.ItemMap, itemID) - return s.storeBucket(bucket) + return s.storeBucket(bucket, false) } // GetItem fetches the storage entry for a given key from its corresponding @@ -488,7 +496,7 @@ func (s *StoragePackerV1) PutItem(item *Item) error { } // Persist the result - return s.storeBucket(bucket) + return s.storeBucket(bucket, false) } // NewStoragePackerV1 creates a new storage packer for a given view @@ -574,3 +582,25 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, return packer, nil } + +func (s *StoragePackerV1) SetQueueMode(enabled bool) { + if enabled { + atomic.StoreUint32(&s.queueMode, 1) + } else { + atomic.StoreUint32(&s.queueMode, 0) + } +} + +func (s *StoragePackerV1) FlushQueue() error { + var err *multierror.Error + s.queuedBuckets.Range(func(key, value interface{}) bool { + lErr := s.storeBucket(value.(*LockedBucket), true) + if lErr != nil { + err = multierror.Append(err, lErr) + } + s.queuedBuckets.Delete(key) + return true + }) + + return err.ErrorOrNil() +} diff --git a/vault/identity_store.go b/vault/identity_store.go index 7b5bda3cb7eda..62a995dc781af 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -339,7 +339,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto persistNeeded = true } - entity.BucketKeyHash = i.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = i.entityPacker.BucketKeyHashByItemID(entity.ID) pN, err := parseExtraEntityFromBucket(ctx, i, &entity) if err != nil { @@ -389,7 +389,7 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide group.NamespaceID = namespace.RootNamespaceID } - group.BucketKeyHash = i.groupPacker.BucketHashKeyForItemID(group.ID) + group.BucketKeyHash = i.groupPacker.BucketKeyHashByItemID(group.ID) return &group, nil } diff --git a/vault/identity_store_aliases_test.go b/vault/identity_store_aliases_test.go index fa5384277a179..2e1f9cf860d4a 100644 --- a/vault/identity_store_aliases_test.go +++ b/vault/identity_store_aliases_test.go @@ -220,7 +220,7 @@ func TestIdentityStore_MemDBAliasIndexes(t *testing.T) { Name: "testentityname", } - entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() diff --git a/vault/identity_store_entities_test.go b/vault/identity_store_entities_test.go index 3ae8b7d7fe54b..f23ad7ead7a62 100644 --- a/vault/identity_store_entities_test.go +++ b/vault/identity_store_entities_test.go @@ -479,7 +479,7 @@ func TestIdentityStore_MemDBImmutability(t *testing.T) { }, } - entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() @@ -733,7 +733,7 @@ func TestIdentityStore_MemDBEntityIndexes(t *testing.T) { }, } - entity.BucketKeyHash = is.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = is.entityPacker.BucketKeyHashByItemID(entity.ID) txn := is.db.Txn(true) defer txn.Abort() diff --git a/vault/identity_store_group_aliases_test.go b/vault/identity_store_group_aliases_test.go index 7a0f5e57319a1..ca1ca3c465324 100644 --- a/vault/identity_store_group_aliases_test.go +++ b/vault/identity_store_group_aliases_test.go @@ -358,7 +358,7 @@ func TestIdentityStore_GroupAliases_MemDBIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid1", "testparentgroupid2"}, MemberEntityIDs: []string{"testentityid1", "testentityid2"}, Policies: []string{"testpolicy1", "testpolicy2"}, - BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid"), + BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid"), } txn := i.db.Txn(true) diff --git a/vault/identity_store_groups_test.go b/vault/identity_store_groups_test.go index ee97e6093cc07..d3043bf8a29f1 100644 --- a/vault/identity_store_groups_test.go +++ b/vault/identity_store_groups_test.go @@ -436,7 +436,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid1", "testparentgroupid2"}, MemberEntityIDs: []string{"testentityid1", "testentityid2"}, Policies: []string{"testpolicy1", "testpolicy2"}, - BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid"), + BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid"), } // Insert it into memdb @@ -459,7 +459,7 @@ func TestIdentityStore_MemDBGroupIndexes(t *testing.T) { ParentGroupIDs: []string{"testparentgroupid2", "testparentgroupid3"}, MemberEntityIDs: []string{"testentityid2", "testentityid3"}, Policies: []string{"testpolicy2", "testpolicy3"}, - BucketKeyHash: i.groupPacker.BucketHashKeyForItemID("testgroupid2"), + BucketKeyHash: i.groupPacker.BucketKeyHashByItemID("testgroupid2"), } // Insert it into memdb diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index f0bc76e602817..ae8a23f86a1ad 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -410,7 +410,7 @@ func TestIdentityStore_MergeConflictingAliases(t *testing.T) { alias, }, } - entity.BucketKeyHash = c.identityStore.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = c.identityStore.entityPacker.BucketKeyHashByItemID(entity.ID) // Now add the alias to two entities, skipping all existing checking by // writing directly entityAny, err := ptypes.MarshalAny(entity) @@ -430,7 +430,7 @@ func TestIdentityStore_MergeConflictingAliases(t *testing.T) { entity.Policies = []string{"bar", "baz"} alias.ID = "alias2" alias.CanonicalID = "entity2" - entity.BucketKeyHash = c.identityStore.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = c.identityStore.entityPacker.BucketKeyHashByItemID(entity.ID) entityAny, err = ptypes.MarshalAny(entity) if err != nil { t.Fatal(err) diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 8968f8ea1e3a0..5ded754ac9f65 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -1011,7 +1011,7 @@ func (i *IdentityStore) sanitizeEntity(ctx context.Context, entity *identity.Ent } // Set the hash value of the storage bucket key in entity - entity.BucketKeyHash = i.entityPacker.BucketHashKeyForItemID(entity.ID) + entity.BucketKeyHash = i.entityPacker.BucketKeyHashByItemID(entity.ID) } ns, err := namespace.FromContext(ctx) @@ -1071,7 +1071,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident } // Set the hash value of the storage bucket key in group - group.BucketKeyHash = i.groupPacker.BucketHashKeyForItemID(group.ID) + group.BucketKeyHash = i.groupPacker.BucketKeyHashByItemID(group.ID) } if group.NamespaceID == "" { From 7c26b993192302d45244034b43c965f998ec5bd7 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 31 Jan 2019 20:56:43 -0500 Subject: [PATCH 14/38] Add entity upgrade code, still need group --- helper/storagepacker/storagepacker_v1.go | 24 +++++++++------- vault/identity_store.go | 36 ++++++++++++++++++++++-- 2 files changed, 47 insertions(+), 13 deletions(-) diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index ea3f1a75c1691..b66c0fd9812dc 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -232,12 +232,20 @@ func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { bucket.Lock() defer bucket.Unlock() - return s.storeBucket(bucket, false) + if err := s.storeBucket(bucket); err != nil { + return err + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(GetCacheKey(bucket.Key), bucket) + s.bucketsCacheLock.Unlock() + + return nil } // storeBucket actually stores the bucket. It expects that it's already locked. -func (s *StoragePackerV1) storeBucket(bucket *LockedBucket, flushMode bool) error { - if !flushMode && atomic.LoadUint32(&s.queueMode) == 1 { +func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { + if atomic.LoadUint32(&s.queueMode) == 1 { s.queuedBuckets.Store(bucket.Key, bucket) return nil } @@ -263,10 +271,6 @@ func (s *StoragePackerV1) storeBucket(bucket *LockedBucket, flushMode bool) erro return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) } - s.bucketsCacheLock.Lock() - s.bucketsCache.Insert(GetCacheKey(bucket.Key), bucket) - s.bucketsCacheLock.Unlock() - return nil } @@ -372,7 +376,7 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { } delete(bucket.ItemMap, itemID) - return s.storeBucket(bucket, false) + return s.storeBucket(bucket) } // GetItem fetches the storage entry for a given key from its corresponding @@ -496,7 +500,7 @@ func (s *StoragePackerV1) PutItem(item *Item) error { } // Persist the result - return s.storeBucket(bucket, false) + return s.storeBucket(bucket) } // NewStoragePackerV1 creates a new storage packer for a given view @@ -594,7 +598,7 @@ func (s *StoragePackerV1) SetQueueMode(enabled bool) { func (s *StoragePackerV1) FlushQueue() error { var err *multierror.Error s.queuedBuckets.Range(func(key, value interface{}) bool { - lErr := s.storeBucket(value.(*LockedBucket), true) + lErr := s.storeBucket(value.(*LockedBucket)) if lErr != nil { err = multierror.Append(err, lErr) } diff --git a/vault/identity_store.go b/vault/identity_store.go index 62a995dc781af..f5dbea889c536 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -76,9 +76,6 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo } entityBucketsToUpgrade = append(entityBucketsToUpgrade, val) } - if len(vals) > 0 { - // TODO - } iStore.entityPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ BucketStorageView: logical.NewStorageView(entitiesBucketStorageView, "v2/"), ConfigStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"config/"), @@ -87,6 +84,39 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } + if len(entityBucketsToUpgrade) > 0 { + iStore.entityPacker.SetQueueMode(true) + for _, key := range entityBucketsToUpgrade { + storageEntry, err := entitiesBucketStorageView.Get(ctx, key) + if err != nil { + return nil, err + } + if storageEntry == nil { + // Not clear what to do here really, but if there's really + // nothing there, nothing to load, so continue + continue + } + bucket, err := iStore.entityPacker.DecodeBucket(storageEntry) + if err != nil { + return nil, err + } + // Set to the new prefix + bucket.Key = bucket.Key + "v2/" + for _, item := range bucket.Items { + bucket.ItemMap[item.ID] = item.Message + } + iStore.entityPacker.PutBucket(bucket) + } + iStore.entityPacker.SetQueueMode(false) + if err := iStore.entityPacker.FlushQueue(); err != nil { + return nil, err + } + for _, key := range entityBucketsToUpgrade { + if err := entitiesBucketStorageView.Delete(ctx, key); err != nil { + return nil, err + } + } + } groupsBucketStorageView := logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/") vals, err = groupsBucketStorageView.List(ctx, "") From f0b83e30c8be3e82d2fd681aa59ad986adf6ab06 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 1 Feb 2019 09:07:47 -0500 Subject: [PATCH 15/38] Only do upgrade if primary --- vault/identity_store.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/vault/identity_store.go b/vault/identity_store.go index f5dbea889c536..c515ee9ba4bd0 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -108,13 +108,15 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo iStore.entityPacker.PutBucket(bucket) } iStore.entityPacker.SetQueueMode(false) - if err := iStore.entityPacker.FlushQueue(); err != nil { - return nil, err - } - for _, key := range entityBucketsToUpgrade { - if err := entitiesBucketStorageView.Delete(ctx, key); err != nil { + if !core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby) { + if err := iStore.entityPacker.FlushQueue(); err != nil { return nil, err } + for _, key := range entityBucketsToUpgrade { + if err := entitiesBucketStorageView.Delete(ctx, key); err != nil { + return nil, err + } + } } } From 0ca022863650f455b607fc18ce20bfa458b6851e Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 1 Feb 2019 18:34:39 -0500 Subject: [PATCH 16/38] Add upgrade test and fix bugs --- helper/storagepacker/storagepacker_v1.go | 8 +- .../storagepacker/legacy_storagepacker.go | 21 +- .../storagepacker_upgrade_test.go | 201 ++++++++++++++++++ vault/identity_store.go | 68 +----- vault/identity_store_util.go | 71 +++++++ vault/testing_util.go | 8 +- 6 files changed, 295 insertions(+), 82 deletions(-) rename {helper => vault/external_tests}/storagepacker/legacy_storagepacker.go (94%) create mode 100644 vault/external_tests/storagepacker/storagepacker_upgrade_test.go diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index b66c0fd9812dc..2d5aa091c5f55 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -485,11 +485,11 @@ func (s *StoragePackerV1) PutItem(item *Item) error { if err != nil { return errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) } - - s.bucketsCacheLock.Lock() - s.bucketsCache.Insert(cacheKey, bucket) - s.bucketsCacheLock.Unlock() } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(cacheKey, bucket) + s.bucketsCacheLock.Unlock() } bucket.Lock() diff --git a/helper/storagepacker/legacy_storagepacker.go b/vault/external_tests/storagepacker/legacy_storagepacker.go similarity index 94% rename from helper/storagepacker/legacy_storagepacker.go rename to vault/external_tests/storagepacker/legacy_storagepacker.go index 7f337c80dd10d..99b3287a1fe47 100644 --- a/helper/storagepacker/legacy_storagepacker.go +++ b/vault/external_tests/storagepacker/legacy_storagepacker.go @@ -13,6 +13,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/compressutil" "github.com/hashicorp/vault/helper/locksutil" + sp2 "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/logical" ) @@ -57,7 +58,7 @@ func (s *LegacyStoragePacker) View() logical.Storage { } // Get returns a bucket for a given key -func (s *LegacyStoragePacker) GetBucket(key string) (*Bucket, error) { +func (s *LegacyStoragePacker) GetBucket(key string) (*sp2.Bucket, error) { if key == "" { return nil, fmt.Errorf("missing bucket key") } @@ -83,7 +84,7 @@ func (s *LegacyStoragePacker) GetBucket(key string) (*Bucket, error) { uncompressedData = storageEntry.Value } - var bucket Bucket + var bucket sp2.Bucket err = proto.Unmarshal(uncompressedData, &bucket) if err != nil { return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) @@ -94,7 +95,7 @@ func (s *LegacyStoragePacker) GetBucket(key string) (*Bucket, error) { // upsert either inserts a new item into the bucket or updates an existing one // if an item with a matching key is already present. -func (s *Bucket) legacyUpsert(item *Item) error { +func legacyUpsert(s *sp2.Bucket, item *sp2.Item) error { if s == nil { return fmt.Errorf("nil storage bucket") } @@ -170,7 +171,7 @@ func (s *LegacyStoragePacker) DeleteItem(itemID string) error { uncompressedData = storageEntry.Value } - var bucket Bucket + var bucket sp2.Bucket err = proto.Unmarshal(uncompressedData, &bucket) if err != nil { return errwrap.Wrapf("failed decoding packed storage entry: {{err}}", err) @@ -201,7 +202,7 @@ func (s *LegacyStoragePacker) DeleteItem(itemID string) error { } // Put stores a packed bucket entry -func (s *LegacyStoragePacker) PutBucket(bucket *Bucket) error { +func (s *LegacyStoragePacker) PutBucket(bucket *sp2.Bucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -240,7 +241,7 @@ func (s *LegacyStoragePacker) PutBucket(bucket *Bucket) error { // GetItem fetches the storage entry for a given key from its corresponding // bucket. -func (s *LegacyStoragePacker) GetItem(itemID string) (*Item, error) { +func (s *LegacyStoragePacker) GetItem(itemID string) (*sp2.Item, error) { if itemID == "" { return nil, fmt.Errorf("empty item ID") } @@ -268,7 +269,7 @@ func (s *LegacyStoragePacker) GetItem(itemID string) (*Item, error) { } // PutItem stores a storage entry in its corresponding bucket -func (s *LegacyStoragePacker) PutItem(item *Item) error { +func (s *LegacyStoragePacker) PutItem(item *sp2.Item) error { if item == nil { return fmt.Errorf("nil item") } @@ -281,7 +282,7 @@ func (s *LegacyStoragePacker) PutItem(item *Item) error { bucketKey := s.BucketKey(item.ID) bucketPath := s.BucketPath(bucketKey) - bucket := &Bucket{ + bucket := &sp2.Bucket{ Key: bucketPath, } @@ -301,7 +302,7 @@ func (s *LegacyStoragePacker) PutItem(item *Item) error { if storageEntry == nil { // If the bucket entry does not exist, this will be the only item the // bucket that is going to be persisted. - bucket.Items = []*Item{ + bucket.Items = []*sp2.Item{ item, } } else { @@ -318,7 +319,7 @@ func (s *LegacyStoragePacker) PutItem(item *Item) error { return errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) } - err = bucket.legacyUpsert(item) + err = legacyUpsert(bucket, item) if err != nil { return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) } diff --git a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go new file mode 100644 index 0000000000000..83bc3d21b42b9 --- /dev/null +++ b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go @@ -0,0 +1,201 @@ +package storagepacker + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/protobuf/ptypes" + log "github.com/hashicorp/go-hclog" + uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/helper/testhelpers" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/shamir" + "github.com/hashicorp/vault/vault" +) + +func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { + logger := logging.NewVaultLogger(log.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + Logger: logger, + }) + + cluster.Start() + defer cluster.Cleanup() + + core := cluster.Cores[0] + vault.TestWaitActive(t, core.Core) + client := core.Client + ctx := context.Background() + + // Step 1: write something into Identity so that we create storage paths + // and know where to put things + _, err := client.Logical().Write("identity/entity", map[string]interface{}{ + "name": "foobar", + }) + + // Step 2: seal, so we can modify data without Vault + if err := client.Sys().Seal(); err != nil { + t.Fatal(err) + } + + // Step 3: Unseal the barrier so we can write legit stuff into the data + // store + barrierKey, err := shamir.Combine(cluster.BarrierKeys[0:3]) + if err != nil { + t.Fatal(err) + } + + barrier, err := vault.NewAESGCMBarrier(core.UnderlyingStorage) + if err != nil { + t.Fatal(err) + } + + if err := barrier.Unseal(ctx, barrierKey); err != nil { + t.Fatal(err) + } + + // Step 4: Remove exisitng packer data, create a legacy packer, write + // stuff, ensure that all buckets are created. + bes, err := barrier.List(ctx, "logical/") + if err != nil { + t.Fatal(err) + } + + if len(bes) > 1 { + t.Fatalf("expected only identity logical area, got %v", bes) + } + + entityPackerLogger := logger.Named("storagepacker").Named("entities") + groupPackerLogger := logger.Named("storagepacker").Named("groups") + storage := logical.NewStorageView(barrier, "logical/"+bes[0]) + + numEntries := 10000 + + if err := logical.ClearView(ctx, storage); err != nil { + t.Fatal(err) + } + + entityPacker, err := NewLegacyStoragePacker(storage, entityPackerLogger, "") + if err != nil { + t.Fatal(err) + } + + groupPacker, err := NewLegacyStoragePacker(storage, groupPackerLogger, "packer/group/buckets/") + if err != nil { + t.Fatal(err) + } + + var entity identity.Entity + var group identity.Group + var item storagepacker.Item + for i := 0; i < numEntries; i++ { + entity.ID, _ = uuid.GenerateUUID() + entity.Name = fmt.Sprintf("%d", i) + entityAsAny, err := ptypes.MarshalAny(&entity) + if err != nil { + t.Fatal(err) + } + item.ID = entity.ID + item.Message = entityAsAny + if err := entityPacker.PutItem(&item); err != nil { + t.Fatal(err) + } + + group.ID, _ = uuid.GenerateUUID() + group.Name = fmt.Sprintf("%d", i) + groupAsAny, err := ptypes.MarshalAny(&group) + if err != nil { + t.Fatal(err) + } + item.ID = group.ID + item.Message = groupAsAny + if err := groupPacker.PutItem(&item); err != nil { + t.Fatal(err) + } + } + + buckets, err := barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) + } + + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) + } + + // Step 5: Unseal Vault, make sure we can fetch every one of the created + // identities, and that storage looks as we expect + testhelpers.EnsureCoresUnsealed(t, cluster) + + for i := 0; i < numEntries; i++ { + secret, err := client.Logical().Read(fmt.Sprintf("identity/entity/name/%d", i)) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatal("bad name") + } + + secret, err = client.Logical().Read(fmt.Sprintf("identity/group/name/%d", i)) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatal("bad name") + } + } + + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 1 { + t.Fatalf("%d", len(buckets)) + } + + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/v2/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) + } + + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 1 { + t.Fatalf("%d", len(buckets)) + } + + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/v2/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) + } + + // Step 6: repeat with groups +} diff --git a/vault/identity_store.go b/vault/identity_store.go index c515ee9ba4bd0..602332f0a24b6 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -63,81 +63,17 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") core.AddLogger(groupsPackerLogger) - // Check for the upgrade case - entitiesBucketStorageView := logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"buckets/") - vals, err := entitiesBucketStorageView.List(ctx, "") - if err != nil { - return nil, err - } - entityBucketsToUpgrade := make([]string, 0, 256) - for _, val := range vals { - if val == "v2/" { - continue - } - entityBucketsToUpgrade = append(entityBucketsToUpgrade, val) - } iStore.entityPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - BucketStorageView: logical.NewStorageView(entitiesBucketStorageView, "v2/"), + BucketStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"buckets/v2/"), ConfigStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"config/"), Logger: entitiesPackerLogger, }) if err != nil { return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } - if len(entityBucketsToUpgrade) > 0 { - iStore.entityPacker.SetQueueMode(true) - for _, key := range entityBucketsToUpgrade { - storageEntry, err := entitiesBucketStorageView.Get(ctx, key) - if err != nil { - return nil, err - } - if storageEntry == nil { - // Not clear what to do here really, but if there's really - // nothing there, nothing to load, so continue - continue - } - bucket, err := iStore.entityPacker.DecodeBucket(storageEntry) - if err != nil { - return nil, err - } - // Set to the new prefix - bucket.Key = bucket.Key + "v2/" - for _, item := range bucket.Items { - bucket.ItemMap[item.ID] = item.Message - } - iStore.entityPacker.PutBucket(bucket) - } - iStore.entityPacker.SetQueueMode(false) - if !core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby) { - if err := iStore.entityPacker.FlushQueue(); err != nil { - return nil, err - } - for _, key := range entityBucketsToUpgrade { - if err := entitiesBucketStorageView.Delete(ctx, key); err != nil { - return nil, err - } - } - } - } - - groupsBucketStorageView := logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/") - vals, err = groupsBucketStorageView.List(ctx, "") - if err != nil { - return nil, err - } - groupBucketsToUpgrade := make([]string, 0, 256) - for _, val := range vals { - if val == "v2/" { - continue - } - groupBucketsToUpgrade = append(groupBucketsToUpgrade, val) - } - if len(vals) > 0 { - // TODO - } iStore.groupPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - BucketStorageView: logical.NewStorageView(groupsBucketStorageView, "v2/"), + BucketStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/v2/"), ConfigStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"config/"), Logger: groupsPackerLogger, }) diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 5ded754ac9f65..2262569eca0a7 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -31,6 +31,77 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { return nil } + // Check for the legacy -> v2 upgrade case + upgradeLegacyStoragePacker := func(prefix string, packer *storagepacker.StoragePackerV1) error { + bucketStorageView := logical.NewStorageView(c.identityStore.view, prefix+"buckets/") + vals, err := bucketStorageView.List(ctx, "") + if err != nil { + return err + } + bucketsToUpgrade := make([]string, 0, 256) + for _, val := range vals { + if val == "v2/" { + continue + } + bucketsToUpgrade = append(bucketsToUpgrade, val) + } + + if len(bucketsToUpgrade) == 0 { + return nil + } + + packer.SetQueueMode(true) + for _, key := range bucketsToUpgrade { + storageEntry, err := bucketStorageView.Get(ctx, key) + if err != nil { + return err + } + if storageEntry == nil { + // Not clear what to do here really, but if there's really + // nothing there, nothing to load, so continue + continue + } + bucket, err := packer.DecodeBucket(storageEntry) + if err != nil { + return err + } + // Set to the new prefix + for _, item := range bucket.Items { + packer.PutItem(item) + } + } + packer.SetQueueMode(false) + // We don't want to try persisting/deleting on a secondary. What will + // happen is: since the buckets will be in the cache in the storage + // packer with the new data, they won't get flushed, but further + // lookups will hit those in-memory updated buckets and return the new + // values, so memdb will actually use the new values. When the new + // buckets get written on the primary, since there is no randomness + // they should also match the buckets that were upgraded in memory + // here; when the old buckets get removed on the primary, it will then + // be essentially a noop here and when memdb tries to expire entries it + // will just not find any. I think. + if !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby) { + if err := packer.FlushQueue(); err != nil { + return err + } + for _, key := range bucketsToUpgrade { + if err := bucketStorageView.Delete(ctx, key); err != nil { + return err + } + } + } + + return nil + } + + if err := upgradeLegacyStoragePacker(entityStoragePackerPrefix, c.identityStore.entityPacker); err != nil { + return err + } + if err := upgradeLegacyStoragePacker(groupStoragePackerPrefix, c.identityStore.groupPacker); err != nil { + return err + } + loadFunc := func(context.Context) error { err := c.identityStore.loadEntities(ctx) if err != nil { diff --git a/vault/testing_util.go b/vault/testing_util.go index 689ce11fd6149..26c7cde057cc7 100644 --- a/vault/testing_util.go +++ b/vault/testing_util.go @@ -2,9 +2,13 @@ package vault -import testing "github.com/mitchellh/go-testing-interface" +import ( + testing "github.com/mitchellh/go-testing-interface" +) func testGenerateCoreKeys() (interface{}, interface{}, error) { return nil, nil, nil } func testGetLicensingConfig(interface{}) *LicensingConfig { return &LicensingConfig{} } -func testAdjustTestCore(*CoreConfig, *TestClusterCore) {} func testExtraClusterCoresTestSetup(testing.T, interface{}, []*TestClusterCore) {} +func testAdjustTestCore(_ *CoreConfig, tcc *TestClusterCore) { + tcc.UnderlyingStorage = tcc.physical +} From a139a5712362575f68d1dc37dbb55db80dd0e1cb Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 1 Feb 2019 19:16:38 -0500 Subject: [PATCH 17/38] Add a step to reseal and reload --- .../storagepacker_upgrade_test.go | 91 ++++++++++--------- 1 file changed, 50 insertions(+), 41 deletions(-) diff --git a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go index 83bc3d21b42b9..bb45cc50ee923 100644 --- a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go +++ b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go @@ -139,63 +139,72 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { // Step 5: Unseal Vault, make sure we can fetch every one of the created // identities, and that storage looks as we expect - testhelpers.EnsureCoresUnsealed(t, cluster) + step5 := func() { + testhelpers.EnsureCoresUnsealed(t, cluster) + + for i := 0; i < numEntries; i++ { + secret, err := client.Logical().Read(fmt.Sprintf("identity/entity/name/%d", i)) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatal("bad name") + } + + secret, err = client.Logical().Read(fmt.Sprintf("identity/group/name/%d", i)) + if err != nil { + t.Fatal(err) + } + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatal("bad name") + } + } - for i := 0; i < numEntries; i++ { - secret, err := client.Logical().Read(fmt.Sprintf("identity/entity/name/%d", i)) + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") if err != nil { t.Fatal(err) } - if secret == nil { - t.Fatal("nil secret") - } - if secret.Data["name"] != fmt.Sprintf("%d", i) { - t.Fatal("bad name") + if len(buckets) != 1 { + t.Fatalf("%d", len(buckets)) } - secret, err = client.Logical().Read(fmt.Sprintf("identity/group/name/%d", i)) + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/v2/") if err != nil { t.Fatal(err) } - if secret == nil { - t.Fatal("nil secret") + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) } - if secret.Data["name"] != fmt.Sprintf("%d", i) { - t.Fatal("bad name") - } - } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") - if err != nil { - t.Fatal(err) - } - if len(buckets) != 1 { - t.Fatalf("%d", len(buckets)) - } + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 1 { + t.Fatalf("%d", len(buckets)) + } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/v2/") - if err != nil { - t.Fatal(err) - } - if len(buckets) != 256 { - t.Fatalf("%d", len(buckets)) + buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/v2/") + if err != nil { + t.Fatal(err) + } + if len(buckets) != 256 { + t.Fatalf("%d", len(buckets)) + } } + step5() - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") - if err != nil { + // Step 6: seal and unseal to make sure we're not just reading cache; IOW repeat step 5 + if err := client.Sys().Seal(); err != nil { t.Fatal(err) } - if len(buckets) != 1 { - t.Fatalf("%d", len(buckets)) - } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/v2/") - if err != nil { - t.Fatal(err) - } - if len(buckets) != 256 { - t.Fatalf("%d", len(buckets)) - } + step5() - // Step 6: repeat with groups } From fe78cfa59adb4e7a9f0aee8c6e5b50e35489cdbf Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Sun, 3 Feb 2019 09:53:17 -0500 Subject: [PATCH 18/38] Port some changes over --- helper/storagepacker/storagepacker.go | 24 ++++++ helper/storagepacker/storagepacker_v1.go | 58 +++++++------- helper/testhelpers/testhelpers.go | 47 +++++------ vault/core.go | 5 ++ vault/expiration.go | 2 +- .../storagepacker/legacy_storagepacker.go | 77 ++++++++++++------- .../storagepacker_upgrade_test.go | 24 +++++- vault/ha.go | 14 +++- vault/identity_store.go | 28 ++++--- vault/identity_store_aliases.go | 2 +- vault/identity_store_entities.go | 8 +- vault/identity_store_group_aliases.go | 2 +- vault/identity_store_groups.go | 2 +- vault/identity_store_structs.go | 4 +- vault/identity_store_util.go | 36 ++++----- vault/request_handling.go | 2 +- 16 files changed, 213 insertions(+), 122 deletions(-) create mode 100644 helper/storagepacker/storagepacker.go diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go new file mode 100644 index 0000000000000..5b5c5d2d6b67b --- /dev/null +++ b/helper/storagepacker/storagepacker.go @@ -0,0 +1,24 @@ +package storagepacker + +import ( + "context" + + "github.com/hashicorp/vault/logical" +) + +type StoragePackerFactory func(context.Context, *Config) (StoragePacker, error) + +type StoragePacker interface { + BucketsView() *logical.StorageView + BucketKeyHashByItemID(string) string + GetCacheKey(string) string + GetBucket(context.Context, string) (*LockedBucket, error) + DecodeBucket(*logical.StorageEntry) (*LockedBucket, error) + PutBucket(context.Context, *LockedBucket) error + DeleteBucket(context.Context, string) error + DeleteItem(context.Context, string) error + GetItem(context.Context, string) (*Item, error) + PutItem(context.Context, *Item) error + SetQueueMode(enabled bool) + FlushQueue(context.Context) error +} diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index 2d5aa091c5f55..eaad1cc224d2b 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -123,16 +123,16 @@ func (s *StoragePackerV1) BucketStorageKeyForItemID(itemID string) string { } func (s *StoragePackerV1) BucketKeyHashByItemID(itemID string) string { - return GetCacheKey(s.BucketStorageKeyForItemID(itemID)) + return s.GetCacheKey(s.BucketStorageKeyForItemID(itemID)) } -func GetCacheKey(key string) string { +func (s *StoragePackerV1) GetCacheKey(key string) string { return strings.Replace(key, "/", "", -1) } // Get returns a bucket for a given key -func (s *StoragePackerV1) GetBucket(key string) (*LockedBucket, error) { - cacheKey := GetCacheKey(key) +func (s *StoragePackerV1) GetBucket(ctx context.Context, key string) (*LockedBucket, error) { + cacheKey := s.GetCacheKey(key) if key == "" { return nil, fmt.Errorf("missing bucket key") @@ -167,7 +167,7 @@ func (s *StoragePackerV1) GetBucket(key string) (*LockedBucket, error) { } // Read from the underlying view - storageEntry, err := s.BucketStorageView.Get(context.Background(), key) + storageEntry, err := s.BucketStorageView.Get(ctx, key) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) } @@ -214,7 +214,7 @@ func (s *StoragePackerV1) DecodeBucket(storageEntry *logical.StorageEntry) (*Loc } // Put stores a packed bucket entry -func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { +func (s *StoragePackerV1) PutBucket(ctx context.Context, bucket *LockedBucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -223,7 +223,7 @@ func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { return fmt.Errorf("missing key") } - cacheKey := GetCacheKey(bucket.Key) + cacheKey := s.GetCacheKey(bucket.Key) lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() @@ -232,19 +232,19 @@ func (s *StoragePackerV1) PutBucket(bucket *LockedBucket) error { bucket.Lock() defer bucket.Unlock() - if err := s.storeBucket(bucket); err != nil { + if err := s.storeBucket(ctx, bucket); err != nil { return err } s.bucketsCacheLock.Lock() - s.bucketsCache.Insert(GetCacheKey(bucket.Key), bucket) + s.bucketsCache.Insert(s.GetCacheKey(bucket.Key), bucket) s.bucketsCacheLock.Unlock() return nil } // storeBucket actually stores the bucket. It expects that it's already locked. -func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { +func (s *StoragePackerV1) storeBucket(ctx context.Context, bucket *LockedBucket) error { if atomic.LoadUint32(&s.queueMode) == 1 { s.queuedBuckets.Store(bucket.Key, bucket) return nil @@ -263,7 +263,7 @@ func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { } // Store the compressed value - err = s.BucketStorageView.Put(context.Background(), &logical.StorageEntry{ + err = s.BucketStorageView.Put(ctx, &logical.StorageEntry{ Key: bucket.Key, Value: compressedBucket, }) @@ -275,18 +275,18 @@ func (s *StoragePackerV1) storeBucket(bucket *LockedBucket) error { } // DeleteBucket deletes an entire bucket entry -func (s *StoragePackerV1) DeleteBucket(key string) error { +func (s *StoragePackerV1) DeleteBucket(ctx context.Context, key string) error { if key == "" { return fmt.Errorf("missing key") } - cacheKey := GetCacheKey(key) + cacheKey := s.GetCacheKey(key) lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() defer lock.Unlock() - if err := s.BucketStorageView.Delete(context.Background(), key); err != nil { + if err := s.BucketStorageView.Delete(ctx, key); err != nil { return errwrap.Wrapf("failed to delete packed storage entry: {{err}}", err) } @@ -322,14 +322,14 @@ func (s *LockedBucket) upsert(item *Item) error { // DeleteItem removes the storage entry which the given key refers to from its // corresponding bucket. -func (s *StoragePackerV1) DeleteItem(itemID string) error { +func (s *StoragePackerV1) DeleteItem(ctx context.Context, itemID string) error { if itemID == "" { return fmt.Errorf("empty item ID") } // Get the bucket key bucketKey := s.BucketStorageKeyForItemID(itemID) - cacheKey := GetCacheKey(bucketKey) + cacheKey := s.GetCacheKey(bucketKey) lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() @@ -345,7 +345,7 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { bucket = bucketRaw.(*LockedBucket) } else { // Read from underlying view - storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) + storageEntry, err := s.BucketStorageView.Get(ctx, bucketKey) if err != nil { return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } @@ -376,18 +376,18 @@ func (s *StoragePackerV1) DeleteItem(itemID string) error { } delete(bucket.ItemMap, itemID) - return s.storeBucket(bucket) + return s.storeBucket(ctx, bucket) } // GetItem fetches the storage entry for a given key from its corresponding // bucket. -func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { +func (s *StoragePackerV1) GetItem(ctx context.Context, itemID string) (*Item, error) { if itemID == "" { return nil, fmt.Errorf("empty item ID") } bucketKey := s.BucketStorageKeyForItemID(itemID) - cacheKey := GetCacheKey(bucketKey) + cacheKey := s.GetCacheKey(bucketKey) lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.RLock() @@ -403,7 +403,7 @@ func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { bucket = bucketRaw.(*LockedBucket) } else { // Read from underlying view - storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) + storageEntry, err := s.BucketStorageView.Get(ctx, bucketKey) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } @@ -442,7 +442,7 @@ func (s *StoragePackerV1) GetItem(itemID string) (*Item, error) { } // PutItem stores a storage entry in its corresponding bucket -func (s *StoragePackerV1) PutItem(item *Item) error { +func (s *StoragePackerV1) PutItem(ctx context.Context, item *Item) error { if item == nil { return fmt.Errorf("nil item") } @@ -453,7 +453,7 @@ func (s *StoragePackerV1) PutItem(item *Item) error { // Get the bucket key bucketKey := s.BucketStorageKeyForItemID(item.ID) - cacheKey := GetCacheKey(bucketKey) + cacheKey := s.GetCacheKey(bucketKey) lock := locksutil.LockForKey(s.storageLocks, cacheKey) lock.Lock() @@ -469,7 +469,7 @@ func (s *StoragePackerV1) PutItem(item *Item) error { bucket = bucketRaw.(*LockedBucket) } else { // Read from underlying view - storageEntry, err := s.BucketStorageView.Get(context.Background(), bucketKey) + storageEntry, err := s.BucketStorageView.Get(ctx, bucketKey) if err != nil { return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } @@ -500,15 +500,17 @@ func (s *StoragePackerV1) PutItem(item *Item) error { } // Persist the result - return s.storeBucket(bucket) + return s.storeBucket(ctx, bucket) } // NewStoragePackerV1 creates a new storage packer for a given view -func NewStoragePackerV1(ctx context.Context, config *Config) (*StoragePackerV1, error) { +func NewStoragePackerV1(ctx context.Context, config *Config) (StoragePacker, error) { if config.BucketStorageView == nil { return nil, fmt.Errorf("nil buckets view") } + config.BucketStorageView = config.BucketStorageView.SubView("v2/") + if config.ConfigStorageView == nil { return nil, fmt.Errorf("nil config view") } @@ -595,10 +597,10 @@ func (s *StoragePackerV1) SetQueueMode(enabled bool) { } } -func (s *StoragePackerV1) FlushQueue() error { +func (s *StoragePackerV1) FlushQueue(ctx context.Context) error { var err *multierror.Error s.queuedBuckets.Range(func(key, value interface{}) bool { - lErr := s.storeBucket(value.(*LockedBucket)) + lErr := s.storeBucket(ctx, value.(*LockedBucket)) if lErr != nil { err = multierror.Append(err, lErr) } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index ae73c2e4a36e3..a02bc0bf2e30d 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -103,32 +103,35 @@ func RandomWithPrefix(name string) string { func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) { t.Helper() for _, core := range c.Cores { - if !core.Sealed() { - continue - } + EnsureCoreUnsealed(t, c, core) + } +} +func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClusterCore) { + if !core.Sealed() { + return + } - client := core.Client - client.Sys().ResetUnsealProcess() - for j := 0; j < len(c.BarrierKeys); j++ { - statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) - if err != nil { - // Sometimes when we get here it's already unsealed on its own - // and then this fails for DR secondaries so check again - if core.Sealed() { - t.Fatal(err) - } - break - } - if statusResp == nil { - t.Fatal("nil status response during unseal") - } - if !statusResp.Sealed { - break + client := core.Client + client.Sys().ResetUnsealProcess() + for j := 0; j < len(c.BarrierKeys); j++ { + statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) + if err != nil { + // Sometimes when we get here it's already unsealed on its own + // and then this fails for DR secondaries so check again + if core.Sealed() { + t.Fatal(err) } + break } - if core.Sealed() { - t.Fatal("core is still sealed") + if statusResp == nil { + t.Fatal("nil status response during unseal") } + if !statusResp.Sealed { + break + } + } + if core.Sealed() { + t.Fatal("core is still sealed") } } diff --git a/vault/core.go b/vault/core.go index 7dc7f95c36b49..7c4dcdf5eb434 100644 --- a/vault/core.go +++ b/vault/core.go @@ -416,6 +416,10 @@ type Core struct { // Stores loggers so we can reset the level allLoggers []log.Logger allLoggersLock sync.RWMutex + + // Can be toggled atomically to cause the core to never try to become + // active, or give up active as soon as it gets it + neverBecomeActive *uint32 } // CoreConfig is used to parameterize a core @@ -590,6 +594,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { activeContextCancelFunc: new(atomic.Value), allLoggers: conf.AllLoggers, builtinRegistry: conf.BuiltinRegistry, + neverBecomeActive: new(uint32), } atomic.StoreUint32(c.sealed, 1) diff --git a/vault/expiration.go b/vault/expiration.go index 924fe6c969b44..4b678fa5b01c0 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -1001,7 +1001,7 @@ func (m *ExpirationManager) RenewToken(ctx context.Context, req *logical.Request if resp.Auth.EntityID != "" && resp.Auth.GroupAliases != nil && m.core.identityStore != nil { - validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(resp.Auth.EntityID, resp.Auth.GroupAliases) + validAliases, err := m.core.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, resp.Auth.EntityID, resp.Auth.GroupAliases) if err != nil { return nil, err } diff --git a/vault/external_tests/storagepacker/legacy_storagepacker.go b/vault/external_tests/storagepacker/legacy_storagepacker.go index 99b3287a1fe47..778e88f9d54d6 100644 --- a/vault/external_tests/storagepacker/legacy_storagepacker.go +++ b/vault/external_tests/storagepacker/legacy_storagepacker.go @@ -13,6 +13,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/compressutil" "github.com/hashicorp/vault/helper/locksutil" + "github.com/hashicorp/vault/helper/storagepacker" sp2 "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/logical" ) @@ -27,7 +28,7 @@ const ( // that gets inserted into the packer should implement StorageBucketItem // interface. type LegacyStoragePacker struct { - view logical.Storage + view *logical.StorageView logger log.Logger storageLocks []*locksutil.LockEntry viewPrefix string @@ -42,7 +43,11 @@ func (s *LegacyStoragePacker) BucketPath(bucketKey string) string { // the item will be stored. The choice of MD5 is only for hash performance // reasons since its value is not used for any security sensitive operation. func (s *LegacyStoragePacker) BucketKeyHashByItemID(itemID string) string { - return s.BucketKeyHashByKey(s.BucketPath(s.BucketKey(itemID))) + return s.BucketPath(s.BucketKey(itemID)) +} + +func (s *LegacyStoragePacker) GetCacheKey(key string) string { + return key } // BucketKeyHashByKey returns the MD5 hash of the bucket storage key @@ -53,12 +58,12 @@ func (s *LegacyStoragePacker) BucketKeyHashByKey(bucketKey string) string { } // View returns the storage view configured to be used by the packer -func (s *LegacyStoragePacker) View() logical.Storage { +func (s *LegacyStoragePacker) BucketsView() *logical.StorageView { return s.view } // Get returns a bucket for a given key -func (s *LegacyStoragePacker) GetBucket(key string) (*sp2.Bucket, error) { +func (s *LegacyStoragePacker) GetBucket(ctx context.Context, key string) (*sp2.LockedBucket, error) { if key == "" { return nil, fmt.Errorf("missing bucket key") } @@ -68,10 +73,20 @@ func (s *LegacyStoragePacker) GetBucket(key string) (*sp2.Bucket, error) { defer lock.RUnlock() // Read from the underlying view - storageEntry, err := s.view.Get(context.Background(), key) + storageEntry, err := s.view.Get(ctx, key) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) } + + lb, err := s.DecodeBucket(storageEntry) + if err != nil { + return nil, err + } + + return lb, nil +} + +func (s *LegacyStoragePacker) DecodeBucket(storageEntry *logical.StorageEntry) (*sp2.LockedBucket, error) { if storageEntry == nil { return nil, nil } @@ -90,7 +105,19 @@ func (s *LegacyStoragePacker) GetBucket(key string) (*sp2.Bucket, error) { return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) } - return &bucket, nil + return &sp2.LockedBucket{Bucket: &bucket}, nil +} + +func (s *LegacyStoragePacker) DeleteBucket(ctx context.Context, key string) error { + if key == "" { + return fmt.Errorf("missing bucket key") + } + + lock := locksutil.LockForKey(s.storageLocks, key) + lock.Lock() + defer lock.Unlock() + + return s.view.Delete(ctx, key) } // upsert either inserts a new item into the bucket or updates an existing one @@ -142,7 +169,7 @@ func (s *LegacyStoragePacker) BucketKey(itemID string) string { // DeleteItem removes the storage entry which the given key refers to from its // corresponding bucket. -func (s *LegacyStoragePacker) DeleteItem(itemID string) error { +func (s *LegacyStoragePacker) DeleteItem(ctx context.Context, itemID string) error { if itemID == "" { return fmt.Errorf("empty item ID") @@ -155,7 +182,7 @@ func (s *LegacyStoragePacker) DeleteItem(itemID string) error { bucketPath := s.BucketPath(bucketKey) // Read from underlying view - storageEntry, err := s.view.Get(context.Background(), bucketPath) + storageEntry, err := s.view.Get(ctx, bucketPath) if err != nil { return errwrap.Wrapf("failed to read packed storage value: {{err}}", err) } @@ -192,7 +219,7 @@ func (s *LegacyStoragePacker) DeleteItem(itemID string) error { bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) // Persist bucket entry only if there is an update - err = s.PutBucket(&bucket) + err = s.PutBucket(ctx, &sp2.LockedBucket{Bucket: &bucket}) if err != nil { return err } @@ -202,7 +229,7 @@ func (s *LegacyStoragePacker) DeleteItem(itemID string) error { } // Put stores a packed bucket entry -func (s *LegacyStoragePacker) PutBucket(bucket *sp2.Bucket) error { +func (s *LegacyStoragePacker) PutBucket(ctx context.Context, bucket *sp2.LockedBucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -228,7 +255,7 @@ func (s *LegacyStoragePacker) PutBucket(bucket *sp2.Bucket) error { } // Store the compressed value - err = s.view.Put(context.Background(), &logical.StorageEntry{ + err = s.view.Put(ctx, &logical.StorageEntry{ Key: bucket.Key, Value: compressedBucket, }) @@ -241,7 +268,7 @@ func (s *LegacyStoragePacker) PutBucket(bucket *sp2.Bucket) error { // GetItem fetches the storage entry for a given key from its corresponding // bucket. -func (s *LegacyStoragePacker) GetItem(itemID string) (*sp2.Item, error) { +func (s *LegacyStoragePacker) GetItem(ctx context.Context, itemID string) (*sp2.Item, error) { if itemID == "" { return nil, fmt.Errorf("empty item ID") } @@ -250,7 +277,7 @@ func (s *LegacyStoragePacker) GetItem(itemID string) (*sp2.Item, error) { bucketPath := s.BucketPath(bucketKey) // Fetch the bucket entry - bucket, err := s.GetBucket(bucketPath) + bucket, err := s.GetBucket(ctx, bucketPath) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) } @@ -269,7 +296,7 @@ func (s *LegacyStoragePacker) GetItem(itemID string) (*sp2.Item, error) { } // PutItem stores a storage entry in its corresponding bucket -func (s *LegacyStoragePacker) PutItem(item *sp2.Item) error { +func (s *LegacyStoragePacker) PutItem(ctx context.Context, item *sp2.Item) error { if item == nil { return fmt.Errorf("nil item") } @@ -326,30 +353,24 @@ func (s *LegacyStoragePacker) PutItem(item *sp2.Item) error { } // Persist the result - return s.PutBucket(bucket) + return s.PutBucket(ctx, &sp2.LockedBucket{Bucket: bucket}) } // NewLegacyStoragePacker creates a new storage packer for a given view -func NewLegacyStoragePacker(view logical.Storage, logger log.Logger, viewPrefix string) (*LegacyStoragePacker, error) { - if view == nil { +func NewLegacyStoragePacker(ctx context.Context, config *storagepacker.Config) (storagepacker.StoragePacker, error) { + if config.BucketStorageView == nil { return nil, fmt.Errorf("nil view") } - if viewPrefix == "" { - viewPrefix = LegacyStoragePackerBucketsPrefix - } - - if !strings.HasSuffix(viewPrefix, "/") { - viewPrefix = viewPrefix + "/" - } - // Create a new packer object for the given view packer := &LegacyStoragePacker{ - view: view, - viewPrefix: viewPrefix, - logger: logger, + view: config.BucketStorageView, + logger: config.Logger, storageLocks: locksutil.CreateLocks(), } return packer, nil } + +func (s *LegacyStoragePacker) SetQueueMode(bool) {} +func (s *LegacyStoragePacker) FlushQueue(context.Context) error { return nil } diff --git a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go index bb45cc50ee923..77cd7ef7f12f7 100644 --- a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go +++ b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go @@ -51,11 +51,21 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { if err != nil { t.Fatal(err) } + if barrierKey == nil { + t.Fatal("nil barrier key") + } + + if core.UnderlyingStorage == nil { + t.Fatal("underlying storage is nil") + } barrier, err := vault.NewAESGCMBarrier(core.UnderlyingStorage) if err != nil { t.Fatal(err) } + if barrier == nil { + t.Fatal("nil barrier") + } if err := barrier.Unseal(ctx, barrierKey); err != nil { t.Fatal(err) @@ -82,12 +92,18 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { t.Fatal(err) } - entityPacker, err := NewLegacyStoragePacker(storage, entityPackerLogger, "") + entityPacker, err := NewLegacyStoragePacker(ctx, &storagepacker.Config{ + BucketStorageView: storage.SubView("packer/buckets/"), + Logger: entityPackerLogger, + }) if err != nil { t.Fatal(err) } - groupPacker, err := NewLegacyStoragePacker(storage, groupPackerLogger, "packer/group/buckets/") + groupPacker, err := NewLegacyStoragePacker(ctx, &storagepacker.Config{ + BucketStorageView: storage.SubView("packer/group/buckets/"), + Logger: groupPackerLogger, + }) if err != nil { t.Fatal(err) } @@ -104,7 +120,7 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { } item.ID = entity.ID item.Message = entityAsAny - if err := entityPacker.PutItem(&item); err != nil { + if err := entityPacker.PutItem(ctx, &item); err != nil { t.Fatal(err) } @@ -116,7 +132,7 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { } item.ID = group.ID item.Message = groupAsAny - if err := groupPacker.PutItem(&item); err != nil { + if err := groupPacker.PutItem(ctx, &item); err != nil { t.Fatal(err) } } diff --git a/vault/ha.go b/vault/ha.go index b1affc56f7d16..0ce579f624fd8 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -400,7 +400,7 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop leaderLostCh := c.acquireLock(lock, stopCh) // Bail if we are being shutdown - if leaderLostCh == nil { + if leaderLostCh == nil || atomic.LoadUint32(c.neverBecomeActive) == 1 { return } c.logger.Info("acquired lock, enabling active operation") @@ -770,6 +770,10 @@ func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan st return leaderLostCh } + if atomic.LoadUint32(c.neverBecomeActive) == 1 { + return nil + } + // Retry the acquisition c.logger.Error("failed to acquire lock", "error", err) select { @@ -869,3 +873,11 @@ func (c *Core) clearLeader(uuid string) error { return err } + +func (c *Core) SetNeverBecomeActive(on bool) { + if on { + atomic.StoreUint32(c.neverBecomeActive, 1) + } else { + atomic.StoreUint32(c.neverBecomeActive, 0) + } +} diff --git a/vault/identity_store.go b/vault/identity_store.go index 602332f0a24b6..7ec5669e4e16d 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "strings" + "sync/atomic" "github.com/golang/protobuf/ptypes" "github.com/hashicorp/errwrap" @@ -27,8 +28,13 @@ var ( sendGroupUpgrade = func(*IdentityStore, *identity.Group) (bool, error) { return false, nil } parseExtraEntityFromBucket = func(context.Context, *IdentityStore, *identity.Entity) (bool, error) { return false, nil } addExtraEntityDataToResponse = func(*identity.Entity, map[string]interface{}) {} + StoragePackerCreationFunc = new(atomic.Value) ) +func init() { + StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV1)) +} + func (c *Core) IdentityStore() *IdentityStore { return c.identityStore } @@ -63,8 +69,10 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo groupsPackerLogger := iStore.logger.Named("storagepacker").Named("groups") core.AddLogger(groupsPackerLogger) - iStore.entityPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - BucketStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"buckets/v2/"), + creationFunc := StoragePackerCreationFunc.Load().(storagepacker.StoragePackerFactory) + + iStore.entityPacker, err = creationFunc(ctx, &storagepacker.Config{ + BucketStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"buckets/"), ConfigStorageView: logical.NewStorageView(iStore.view, entityStoragePackerPrefix+"config/"), Logger: entitiesPackerLogger, }) @@ -72,8 +80,8 @@ func NewIdentityStore(ctx context.Context, core *Core, config *logical.BackendCo return nil, errwrap.Wrapf("failed to create entity packer: {{err}}", err) } - iStore.groupPacker, err = storagepacker.NewStoragePackerV1(ctx, &storagepacker.Config{ - BucketStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/v2/"), + iStore.groupPacker, err = creationFunc(ctx, &storagepacker.Config{ + BucketStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"buckets/"), ConfigStorageView: logical.NewStorageView(iStore.view, groupStoragePackerPrefix+"config/"), Logger: groupsPackerLogger, }) @@ -119,7 +127,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { switch { // Check if the key is a storage entry key for an entity bucket case strings.HasPrefix(key, i.entityPacker.BucketsView().Prefix()): - bucketKeyHash := storagepacker.GetCacheKey(strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) + bucketKeyHash := i.entityPacker.GetCacheKey(strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) // Create a MemDB transaction txn := i.db.Txn(true) @@ -153,7 +161,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.entityPacker.GetBucket(key) + bucket, err := i.entityPacker.GetBucket(ctx, key) if err != nil { i.logger.Error("failed to refresh entities", "key", key, "error", err) return @@ -188,7 +196,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { // Check if the key is a storage entry key for an group bucket case strings.HasPrefix(key, i.groupPacker.BucketsView().Prefix()): - bucketKeyHash := storagepacker.GetCacheKey(strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) + bucketKeyHash := i.groupPacker.GetCacheKey(strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) // Create a MemDB transaction txn := i.db.Txn(true) @@ -210,7 +218,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.groupPacker.GetBucket(key) + bucket, err := i.groupPacker.GetBucket(ctx, key) if err != nil { i.logger.Error("failed to refresh group", "key", key, "error", err) return @@ -243,7 +251,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Only update MemDB and don't touch the storage - err = i.UpsertGroupInTxn(txn, group, false) + err = i.UpsertGroupInTxn(ctx, txn, group, false) if err != nil { i.logger.Error("failed to update group in MemDB", "error", err) return @@ -329,7 +337,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto } // Store the entity with new format - err = i.entityPacker.PutItem(item) + err = i.entityPacker.PutItem(ctx, item) if err != nil { return nil, err } diff --git a/vault/identity_store_aliases.go b/vault/identity_store_aliases.go index 88259240f506e..7084ec5db76e7 100644 --- a/vault/identity_store_aliases.go +++ b/vault/identity_store_aliases.go @@ -406,7 +406,7 @@ func (i *IdentityStore) pathAliasIDDelete() framework.OperationFunc { Message: entityAsAny, } - err = i.entityPacker.PutItem(item) + err = i.entityPacker.PutItem(ctx, item) if err != nil { return nil, err } diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index 4cfadb6873a11..2c2dd6218dbd1 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -489,7 +489,7 @@ func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb for _, group := range groups { group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entity.ID) - err = i.UpsertGroupInTxn(txn, group, true) + err = i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return err } @@ -508,7 +508,7 @@ func (i *IdentityStore) handleEntityDeleteCommon(ctx context.Context, txn *memdb } // Delete the entity from storage - err = i.entityPacker.DeleteItem(entity.ID) + err = i.entityPacker.DeleteItem(ctx, entity.ID) if err != nil { return err } @@ -705,7 +705,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit } // Delete the entity which we are merging from in storage - err = i.entityPacker.DeleteItem(fromEntity.ID) + err = i.entityPacker.DeleteItem(ctx, fromEntity.ID) if err != nil { return nil, err } @@ -727,7 +727,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit Message: toEntityAsAny, } - err = i.entityPacker.PutItem(item) + err = i.entityPacker.PutItem(ctx, item) if err != nil { return nil, err } diff --git a/vault/identity_store_group_aliases.go b/vault/identity_store_group_aliases.go index 4a57b0aadd72c..e4ab685223553 100644 --- a/vault/identity_store_group_aliases.go +++ b/vault/identity_store_group_aliases.go @@ -294,7 +294,7 @@ func (i *IdentityStore) pathGroupAliasIDDelete() framework.OperationFunc { // Delete the alias group.Alias = nil - err = i.UpsertGroupInTxn(txn, group, true) + err = i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return nil, err } diff --git a/vault/identity_store_groups.go b/vault/identity_store_groups.go index d8c3280bcdb3c..123325a14c65b 100644 --- a/vault/identity_store_groups.go +++ b/vault/identity_store_groups.go @@ -443,7 +443,7 @@ func (i *IdentityStore) handleGroupDeleteCommon(ctx context.Context, key string, } // Delete the group from storage - err = i.groupPacker.DeleteItem(group.ID) + err = i.groupPacker.DeleteItem(ctx, group.ID) if err != nil { return nil, err } diff --git a/vault/identity_store_structs.go b/vault/identity_store_structs.go index 605684c2174d1..1e7e54912b6bf 100644 --- a/vault/identity_store_structs.go +++ b/vault/identity_store_structs.go @@ -62,11 +62,11 @@ type IdentityStore struct { // entityPacker is used to pack multiple entity storage entries into 256 // buckets - entityPacker *storagepacker.StoragePackerV1 + entityPacker storagepacker.StoragePacker // groupPacker is used to pack multiple group storage entries into 256 // buckets - groupPacker *storagepacker.StoragePackerV1 + groupPacker storagepacker.StoragePacker // core is the pointer to Vault's core core *Core diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 2262569eca0a7..091d5b77ad3af 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -32,7 +32,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { } // Check for the legacy -> v2 upgrade case - upgradeLegacyStoragePacker := func(prefix string, packer *storagepacker.StoragePackerV1) error { + upgradeLegacyStoragePacker := func(prefix string, packer storagepacker.StoragePacker) error { bucketStorageView := logical.NewStorageView(c.identityStore.view, prefix+"buckets/") vals, err := bucketStorageView.List(ctx, "") if err != nil { @@ -67,7 +67,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { } // Set to the new prefix for _, item := range bucket.Items { - packer.PutItem(item) + packer.PutItem(ctx, item) } } packer.SetQueueMode(false) @@ -82,7 +82,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { // be essentially a noop here and when memdb tries to expire entries it // will just not find any. I think. if !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby) { - if err := packer.FlushQueue(); err != nil { + if err := packer.FlushQueue(ctx); err != nil { return err } for _, key := range bucketsToUpgrade { @@ -182,7 +182,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { return } - bucket, err := i.groupPacker.GetBucket(bucketKey) + bucket, err := i.groupPacker.GetBucket(ctx, bucketKey) if err != nil { errs <- err continue @@ -279,7 +279,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { } } - err = i.UpsertGroupInTxn(txn, group, persist) + err = i.UpsertGroupInTxn(ctx, txn, group, persist) if err != nil { txn.Abort() return errwrap.Wrapf("failed to update group in memdb: {{err}}", err) @@ -337,7 +337,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { return } - bucket, err := i.entityPacker.GetBucket(bucketKey) + bucket, err := i.entityPacker.GetBucket(ctx, bucketKey) if err != nil { errs <- err continue @@ -530,7 +530,7 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e if err != nil { return err } - err = i.entityPacker.PutItem(&storagepacker.Item{ + err = i.entityPacker.PutItem(ctx, &storagepacker.Item{ ID: previousEntity.ID, Message: marshaledPreviousEntity, }) @@ -556,7 +556,7 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e } // Persist the entity object - err = i.entityPacker.PutItem(item) + err = i.entityPacker.PutItem(ctx, item) if err != nil { return err } @@ -1229,7 +1229,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident // Remove group ID from the parent group IDs currentMemberGroup.ParentGroupIDs = strutil.StrListDelete(currentMemberGroup.ParentGroupIDs, group.ID) - err = i.UpsertGroupInTxn(txn, currentMemberGroup, true) + err = i.UpsertGroupInTxn(ctx, txn, currentMemberGroup, true) if err != nil { return err } @@ -1285,7 +1285,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident // This technically is not upsert. It is only update, only the method // name is upsert here. - err = i.UpsertGroupInTxn(txn, memberGroup, true) + err = i.UpsertGroupInTxn(ctx, txn, memberGroup, true) if err != nil { // Ideally we would want to revert the whole operation in case of // errors while persisting in member groups. But there is no @@ -1304,7 +1304,7 @@ func (i *IdentityStore) sanitizeAndUpsertGroup(ctx context.Context, group *ident } } - err = i.UpsertGroupInTxn(txn, group, true) + err = i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return err } @@ -1431,11 +1431,11 @@ func (i *IdentityStore) MemDBGroupByName(ctx context.Context, groupName string, return i.MemDBGroupByNameInTxn(ctx, txn, groupName, clone) } -func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error { +func (i *IdentityStore) UpsertGroup(ctx context.Context, group *identity.Group, persist bool) error { txn := i.db.Txn(true) defer txn.Abort() - err := i.UpsertGroupInTxn(txn, group, true) + err := i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return err } @@ -1445,7 +1445,7 @@ func (i *IdentityStore) UpsertGroup(group *identity.Group, persist bool) error { return nil } -func (i *IdentityStore) UpsertGroupInTxn(txn *memdb.Txn, group *identity.Group, persist bool) error { +func (i *IdentityStore) UpsertGroupInTxn(ctx context.Context, txn *memdb.Txn, group *identity.Group, persist bool) error { var err error if txn == nil { @@ -1501,7 +1501,7 @@ func (i *IdentityStore) UpsertGroupInTxn(txn *memdb.Txn, group *identity.Group, return err } if !sent { - if err := i.groupPacker.PutItem(item); err != nil { + if err := i.groupPacker.PutItem(ctx, item); err != nil { return err } } @@ -1945,7 +1945,7 @@ func (i *IdentityStore) MemDBGroupByAliasID(aliasID string, clone bool) (*identi return i.MemDBGroupByAliasIDInTxn(txn, aliasID, clone) } -func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) { +func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(ctx context.Context, entityID string, groupAliases []*logical.Alias) ([]*logical.Alias, error) { i.logger.Debug("refreshing external group memberships", "entity_id", entityID, "group_aliases", groupAliases) if entityID == "" { return nil, fmt.Errorf("empty entity ID") @@ -2001,7 +2001,7 @@ func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID strin group.MemberEntityIDs = append(group.MemberEntityIDs, entityID) - err = i.UpsertGroupInTxn(txn, group, true) + err = i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return nil, err } @@ -2023,7 +2023,7 @@ func (i *IdentityStore) refreshExternalGroupMembershipsByEntityID(entityID strin group.MemberEntityIDs = strutil.StrListDelete(group.MemberEntityIDs, entityID) - err = i.UpsertGroupInTxn(txn, group, true) + err = i.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { return nil, err } diff --git a/vault/request_handling.go b/vault/request_handling.go index a46921a0825e4..a4e049cf799c1 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -1019,7 +1019,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re auth.EntityID = entity.ID if auth.GroupAliases != nil { - validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(auth.EntityID, auth.GroupAliases) + validAliases, err := c.identityStore.refreshExternalGroupMembershipsByEntityID(ctx, auth.EntityID, auth.GroupAliases) if err != nil { return nil, nil, err } From 1133067f6acadc95e04bc022abd6843ed7c21656 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Sun, 3 Feb 2019 10:25:51 -0500 Subject: [PATCH 19/38] Fix location of never become active --- helper/testhelpers/testhelpers.go | 17 +++++++++++++++++ vault/ha.go | 15 ++++++++++----- vault/testing.go | 2 +- 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index a02bc0bf2e30d..aff482ec01484 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -135,6 +135,23 @@ func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClust } } +func EnsureCoreIsPerfStandby(t testing.T, core *vault.TestClusterCore) { + start := time.Now() + for { + health, err := core.Client.Sys().Health() + if err != nil { + t.Fatal(err) + } + if health.PerformanceStandby { + break + } + time.Sleep(time.Millisecond * 500) + if time.Now().After(start.Add(time.Second * 30)) { + t.Fatal("did not become a perf standby") + } + } +} + func WaitForReplicationState(t testing.T, c *vault.Core, state consts.ReplicationState) { timeout := time.Now().Add(10 * time.Second) for { diff --git a/vault/ha.go b/vault/ha.go index 0ce579f624fd8..e9813b9086dbc 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -400,9 +400,17 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop leaderLostCh := c.acquireLock(lock, stopCh) // Bail if we are being shutdown - if leaderLostCh == nil || atomic.LoadUint32(c.neverBecomeActive) == 1 { + if leaderLostCh == nil { return } + + if atomic.LoadUint32(c.neverBecomeActive) == 1 { + c.heldHALock = nil + lock.Unlock() + c.logger.Info("marked never become active, giving up after interrupting perf standbys") + continue + } + c.logger.Info("acquired lock, enabling active operation") // This is used later to log a metrics event; this can be helpful to @@ -410,6 +418,7 @@ func (c *Core) waitForLeadership(newLeaderCh chan func(), manualStepDownCh, stop activeTime := time.Now() continueCh := interruptPerfStandby(newLeaderCh, stopCh) + // Grab the statelock or stop if stopped := grabLockOrStop(c.stateLock.Lock, c.stateLock.Unlock, stopCh); stopped { lock.Unlock() @@ -770,10 +779,6 @@ func (c *Core) acquireLock(lock physical.Lock, stopCh <-chan struct{}) <-chan st return leaderLostCh } - if atomic.LoadUint32(c.neverBecomeActive) == 1 { - return nil - } - // Retry the acquisition c.logger.Error("failed to acquire lock", "error", err) select { diff --git a/vault/testing.go b/vault/testing.go index a5faceb8588cd..13021068a9dd3 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -688,7 +688,7 @@ func TestWaitActiveWithError(core *Core) error { start := time.Now() var standby bool var err error - for time.Now().Sub(start) < time.Second { + for time.Now().Sub(start) < 30*time.Second { standby, err = core.Standby() if err != nil { return err From 31f2987b3f25608d347cc3da0c1473d7590bffdd Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Sun, 3 Feb 2019 15:32:16 -0500 Subject: [PATCH 20/38] Change initial test to use constructor swapping --- logical/logical_storage.go | 38 +---- .../storagepacker_upgrade_test.go | 153 +++++++----------- vault/identity_store_entities.go | 3 +- 3 files changed, 62 insertions(+), 132 deletions(-) diff --git a/logical/logical_storage.go b/logical/logical_storage.go index eedc9e42fe6ed..cece9e3b2198b 100644 --- a/logical/logical_storage.go +++ b/logical/logical_storage.go @@ -2,25 +2,12 @@ package logical import ( "context" - "fmt" - - log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/physical" - "github.com/hashicorp/vault/physical/file" - "github.com/hashicorp/vault/physical/inmem" -) - -type LogicalType string - -const ( - LogicalTypeInmem LogicalType = "inmem" - LogicalTypeFile LogicalType = "file" ) type LogicalStorage struct { - logicalType LogicalType - underlying physical.Backend + underlying physical.Backend } func (s *LogicalStorage) Get(ctx context.Context, key string) (*StorageEntry, error) { @@ -58,25 +45,8 @@ func (s *LogicalStorage) Underlying() physical.Backend { return s.underlying } -func NewLogicalStorage(logicalType LogicalType, config map[string]string, logger log.Logger) (*LogicalStorage, error) { - s := &LogicalStorage{ - logicalType: logicalType, - } - var err error - switch logicalType { - case LogicalTypeInmem: - s.underlying, err = inmem.NewInmem(nil, nil) - if err != nil { - return nil, err - } - case LogicalTypeFile: - s.underlying, err = file.NewFileBackend(config, logger) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("unsupported logical type %q", logicalType) +func NewLogicalStorage(underlying physical.Backend) *LogicalStorage { + return &LogicalStorage{ + underlying: underlying, } - - return s, nil } diff --git a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go index 77cd7ef7f12f7..dd057cfc09139 100644 --- a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go +++ b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go @@ -5,17 +5,14 @@ import ( "fmt" "testing" - "github.com/golang/protobuf/ptypes" log "github.com/hashicorp/go-hclog" - uuid "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/logging" "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/helper/testhelpers" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" - "github.com/hashicorp/vault/shamir" "github.com/hashicorp/vault/vault" + "github.com/kr/pretty" ) func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { @@ -33,125 +30,79 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { vault.TestWaitActive(t, core.Core) client := core.Client ctx := context.Background() - - // Step 1: write something into Identity so that we create storage paths - // and know where to put things - _, err := client.Logical().Write("identity/entity", map[string]interface{}{ - "name": "foobar", - }) - - // Step 2: seal, so we can modify data without Vault - if err := client.Sys().Seal(); err != nil { - t.Fatal(err) - } - - // Step 3: Unseal the barrier so we can write legit stuff into the data - // store - barrierKey, err := shamir.Combine(cluster.BarrierKeys[0:3]) - if err != nil { - t.Fatal(err) - } - if barrierKey == nil { - t.Fatal("nil barrier key") - } - - if core.UnderlyingStorage == nil { - t.Fatal("underlying storage is nil") - } - - barrier, err := vault.NewAESGCMBarrier(core.UnderlyingStorage) - if err != nil { - t.Fatal(err) - } - if barrier == nil { - t.Fatal("nil barrier") - } - - if err := barrier.Unseal(ctx, barrierKey); err != nil { - t.Fatal(err) - } - - // Step 4: Remove exisitng packer data, create a legacy packer, write - // stuff, ensure that all buckets are created. - bes, err := barrier.List(ctx, "logical/") - if err != nil { - t.Fatal(err) - } - - if len(bes) > 1 { - t.Fatalf("expected only identity logical area, got %v", bes) - } - - entityPackerLogger := logger.Named("storagepacker").Named("entities") - groupPackerLogger := logger.Named("storagepacker").Named("groups") - storage := logical.NewStorageView(barrier, "logical/"+bes[0]) - numEntries := 10000 - if err := logical.ClearView(ctx, storage); err != nil { - t.Fatal(err) - } + storage := logical.NewLogicalStorage(core.UnderlyingStorage) - entityPacker, err := NewLegacyStoragePacker(ctx, &storagepacker.Config{ - BucketStorageView: storage.SubView("packer/buckets/"), - Logger: entityPackerLogger, - }) - if err != nil { - t.Fatal(err) - } + // Step 1: Seal, so we can swap out the packer creation func + cluster.EnsureCoresSealed(t) - groupPacker, err := NewLegacyStoragePacker(ctx, &storagepacker.Config{ - BucketStorageView: storage.SubView("packer/group/buckets/"), - Logger: groupPackerLogger, - }) - if err != nil { - t.Fatal(err) - } + // Step 2: Start with a legacy packer + vault.StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(NewLegacyStoragePacker)) + + // Step 3: Unseal with legacy, write stuff + testhelpers.EnsureCoresUnsealed(t, cluster) + vault.TestWaitActive(t, core.Core) - var entity identity.Entity - var group identity.Group - var item storagepacker.Item for i := 0; i < numEntries; i++ { - entity.ID, _ = uuid.GenerateUUID() - entity.Name = fmt.Sprintf("%d", i) - entityAsAny, err := ptypes.MarshalAny(&entity) + secret, err := client.Logical().Write("identity/entity", map[string]interface{}{ + "name": fmt.Sprintf("%d", i), + }) if err != nil { t.Fatal(err) } - item.ID = entity.ID - item.Message = entityAsAny - if err := entityPacker.PutItem(ctx, &item); err != nil { - t.Fatal(err) + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatalf("bad name, secret is %s", pretty.Sprint(secret)) } - group.ID, _ = uuid.GenerateUUID() - group.Name = fmt.Sprintf("%d", i) - groupAsAny, err := ptypes.MarshalAny(&group) + secret, err = client.Logical().Write("identity/group", map[string]interface{}{ + "name": fmt.Sprintf("%d", i), + }) if err != nil { t.Fatal(err) } - item.ID = group.ID - item.Message = groupAsAny - if err := groupPacker.PutItem(ctx, &item); err != nil { - t.Fatal(err) + if secret == nil { + t.Fatal("nil secret") + } + if secret.Data["name"] != fmt.Sprintf("%d", i) { + t.Fatal("bad name") } } - buckets, err := barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") + // Step 4: Seal Vault again, check that the values we expect exist, swap to new storage packer + cluster.EnsureCoresSealed(t) + + bes, err := storage.List(ctx, "logical/") + if err != nil { + t.Fatal(err) + } + + if len(bes) > 1 { + t.Fatalf("expected only identity logical area, got %v", bes) + } + + buckets, err := storage.List(ctx, "logical/"+bes[0]+"packer/buckets/") if err != nil { t.Fatal(err) } if len(buckets) != 256 { t.Fatalf("%d", len(buckets)) } + t.Log(buckets) - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") + buckets, err = storage.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") if err != nil { t.Fatal(err) } if len(buckets) != 256 { t.Fatalf("%d", len(buckets)) } + t.Log(buckets) + + vault.StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV1)) // Step 5: Unseal Vault, make sure we can fetch every one of the created // identities, and that storage looks as we expect @@ -182,7 +133,7 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { } } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/") + buckets, err = storage.List(ctx, "logical/"+bes[0]+"packer/buckets/") if err != nil { t.Fatal(err) } @@ -190,7 +141,9 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { t.Fatalf("%d", len(buckets)) } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/buckets/v2/") + t.Log(buckets) + + buckets, err = storage.List(ctx, "logical/"+bes[0]+"packer/buckets/v2/") if err != nil { t.Fatal(err) } @@ -198,7 +151,9 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { t.Fatalf("%d", len(buckets)) } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") + t.Log(buckets) + + buckets, err = storage.List(ctx, "logical/"+bes[0]+"packer/group/buckets/") if err != nil { t.Fatal(err) } @@ -206,13 +161,17 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { t.Fatalf("%d", len(buckets)) } - buckets, err = barrier.List(ctx, "logical/"+bes[0]+"packer/group/buckets/v2/") + t.Log(buckets) + + buckets, err = storage.List(ctx, "logical/"+bes[0]+"packer/group/buckets/v2/") if err != nil { t.Fatal(err) } if len(buckets) != 256 { t.Fatalf("%d", len(buckets)) } + + t.Log(buckets) } step5() diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index 2c2dd6218dbd1..990b50f1ac9a7 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -261,7 +261,8 @@ func (i *IdentityStore) handleEntityUpdateCommon() framework.OperationFunc { // Prepare the response respData := map[string]interface{}{ - "id": entity.ID, + "id": entity.ID, + "name": entity.Name, } var aliasIDs []string From 187ba69b8d8c242fdc478f9a98a5ff55be89fbd2 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Mon, 4 Feb 2019 13:19:20 -0500 Subject: [PATCH 21/38] Add some testhelpers and fix some bugs --- helper/testhelpers/testhelpers.go | 99 ++++++++++++++++++++++++++++++- vault/identity_store.go | 38 ++++++++++-- vault/identity_store_util.go | 15 ++++- 3 files changed, 143 insertions(+), 9 deletions(-) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index aff482ec01484..ae0795a5029fe 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -28,8 +28,12 @@ type ReplicatedTestClusters struct { func (r *ReplicatedTestClusters) Cleanup() { r.PerfPrimaryCluster.Cleanup() r.PerfSecondaryCluster.Cleanup() - r.PerfPrimaryDRCluster.Cleanup() - r.PerfSecondaryDRCluster.Cleanup() + if r.PerfPrimaryDRCluster != nil { + r.PerfPrimaryDRCluster.Cleanup() + } + if r.PerfSecondaryDRCluster != nil { + r.PerfSecondaryDRCluster.Cleanup() + } } // Generates a root token on the target cluster. @@ -100,6 +104,29 @@ func RandomWithPrefix(name string) string { return fmt.Sprintf("%s-%d", name, rand.New(rand.NewSource(time.Now().UnixNano())).Int()) } +func EnsureCoresSealed(t testing.T, c *vault.TestCluster) { + t.Helper() + for _, core := range c.Cores { + EnsureCoreSealed(t, core) + } +} + +func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) error { + client := core.Client + client.Sys().Seal() + timeout := time.Now().Add(60 * time.Second) + for { + if time.Now().After(timeout) { + return fmt.Errorf("timeout waiting for core to seal") + } + if core.Core.Sealed() { + break + } + time.Sleep(250 * time.Millisecond) + } + return nil +} + func EnsureCoresUnsealed(t testing.T, c *vault.TestCluster) { t.Helper() for _, core := range c.Cores { @@ -195,6 +222,30 @@ func GetClusterAndCore(t testing.T, logger log.Logger, handlerFunc func(*vault.H return cluster, core } +func GetPerfReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerProperties) http.Handler) *ReplicatedTestClusters { + ret := &ReplicatedTestClusters{} + + logger := log.New(&log.LoggerOptions{ + Mutex: &sync.Mutex{}, + Level: log.Trace, + }) + // Set this lower so that state populates quickly to standby nodes + vault.HeartbeatInterval = 2 * time.Second + + ret.PerfPrimaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-pri"), handlerFunc) + + ret.PerfSecondaryCluster, _ = GetClusterAndCore(t, logger.Named("perf-sec"), handlerFunc) + + SetupTwoClusterPerfReplication(t, ret.PerfPrimaryCluster, ret.PerfSecondaryCluster) + + // Wait until poison pills have been read + time.Sleep(45 * time.Second) + EnsureCoresUnsealed(t, ret.PerfPrimaryCluster) + EnsureCoresUnsealed(t, ret.PerfSecondaryCluster) + + return ret +} + func GetFourReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerProperties) http.Handler) *ReplicatedTestClusters { ret := &ReplicatedTestClusters{} @@ -225,6 +276,46 @@ func GetFourReplicatedClusters(t testing.T, handlerFunc func(*vault.HandlerPrope return ret } +func SetupTwoClusterPerfReplication(t testing.T, perfPrimary, perfSecondary *vault.TestCluster) { + // Enable performance primary + _, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/performance/primary/enable", nil) + if err != nil { + t.Fatal(err) + } + + WaitForReplicationState(t, perfPrimary.Cores[0].Core, consts.ReplicationPerformancePrimary) + + // get performance token + secret, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/performance/primary/secondary-token", map[string]interface{}{ + "id": "1", + }) + if err != nil { + t.Fatal(err) + } + + token := secret.WrapInfo.Token + + // enable performace secondary + secret, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/performance/secondary/enable", map[string]interface{}{ + "token": token, + "ca_file": perfPrimary.CACertPEMFile, + }) + if err != nil { + t.Fatal(err) + } + + WaitForReplicationState(t, perfSecondary.Cores[0].Core, consts.ReplicationPerformanceSecondary) + time.Sleep(time.Second * 3) + perfSecondary.BarrierKeys = perfPrimary.BarrierKeys + + EnsureCoresUnsealed(t, perfSecondary) + rootToken := GenerateRoot(t, perfSecondary, false) + perfSecondary.Cores[0].Client.SetToken(rootToken) + for _, core := range perfSecondary.Cores { + core.Client.SetToken(rootToken) + } +} + func SetupFourClusterReplication(t testing.T, perfPrimary, perfSecondary, perfDRSecondary, perfSecondaryDRSecondary *vault.TestCluster) { // Enable dr primary _, err := perfPrimary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil) @@ -289,7 +380,9 @@ func SetupFourClusterReplication(t testing.T, perfPrimary, perfSecondary, perfDR EnsureCoresUnsealed(t, perfSecondary) rootToken := GenerateRoot(t, perfSecondary, false) - perfSecondary.Cores[0].Client.SetToken(rootToken) + for _, core := range perfSecondary.Cores { + core.Client.SetToken(rootToken) + } // Enable dr primary on perf secondary _, err = perfSecondary.Cores[0].Client.Logical().Write("sys/replication/dr/primary/enable", nil) diff --git a/vault/identity_store.go b/vault/identity_store.go index 7ec5669e4e16d..844a7b042957e 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" + "github.com/kr/pretty" ) const ( @@ -127,6 +128,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { switch { // Check if the key is a storage entry key for an entity bucket case strings.HasPrefix(key, i.entityPacker.BucketsView().Prefix()): + i.logger.Trace("found entity bucket for invalidation", "key", key, "prefix", i.entityPacker.BucketsView().Prefix()) bucketKeyHash := i.entityPacker.GetCacheKey(strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) // Create a MemDB transaction @@ -161,7 +163,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.entityPacker.GetBucket(ctx, key) + bucket, err := i.entityPacker.GetBucket(ctx, strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) if err != nil { i.logger.Error("failed to refresh entities", "key", key, "error", err) return @@ -174,9 +176,15 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { // storage entry is non-nil, its an indication of an update. In this // case, entities in the updated bucket needs to be reinserted into // MemDB. + parsedCount := 0 if bucket != nil { + items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) + items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - entity, err := i.parseEntityFromBucketItem(ctx, &storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Message: message}) + } + for _, item := range items { + entity, err := i.parseEntityFromBucketItem(ctx, item) if err != nil { i.logger.Error("failed to parse entity from bucket entry item", "error", err) return @@ -188,14 +196,22 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { i.logger.Error("failed to update entity in MemDB", "error", err) return } + parsedCount++ } } + if parsedCount > 0 { + i.logger.Trace("parsed entities for invalidation", "num_entities", parsedCount) + } else { + i.logger.Error("found no groups", "bucket", pretty.Sprint(bucket)) + } + txn.Commit() return // Check if the key is a storage entry key for an group bucket case strings.HasPrefix(key, i.groupPacker.BucketsView().Prefix()): + i.logger.Trace("found group bucket for invalidation", "key", key, "prefix", i.groupPacker.BucketsView().Prefix()) bucketKeyHash := i.groupPacker.GetCacheKey(strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) // Create a MemDB transaction @@ -218,15 +234,21 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.groupPacker.GetBucket(ctx, key) + bucket, err := i.groupPacker.GetBucket(ctx, strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) if err != nil { i.logger.Error("failed to refresh group", "key", key, "error", err) return } + parsedCount := 0 if bucket != nil { + items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) + items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - group, err := i.parseGroupFromBucketItem(&storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Message: message}) + } + for _, item := range items { + group, err := i.parseGroupFromBucketItem(item) if err != nil { i.logger.Error("failed to parse group from bucket entry item", "error", err) return @@ -256,9 +278,17 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { i.logger.Error("failed to update group in MemDB", "error", err) return } + + parsedCount++ } } + if parsedCount > 0 { + i.logger.Trace("parsed entities for invalidation", "num_entities", parsedCount) + } else { + i.logger.Error("found no entities", "bucket", pretty.Sprint(bucket)) + } + txn.Commit() return } diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 091d5b77ad3af..a5f7640323249 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -236,8 +236,14 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { continue } + // Need to check both map and Items in case it's during upgrading + items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) + items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - group, err := i.parseGroupFromBucketItem(&storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Message: message}) + } + for _, item := range items { + group, err := i.parseGroupFromBucketItem(item) if err != nil { return err } @@ -393,8 +399,13 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { continue } + items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) + items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - entity, err := i.parseEntityFromBucketItem(ctx, &storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Message: message}) + } + for _, item := range items { + entity, err := i.parseEntityFromBucketItem(ctx, item) if err != nil { return err } From ad6cf661b2babdeadb6187dd6ffe3d3a59420200 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 5 Feb 2019 12:02:46 -0500 Subject: [PATCH 22/38] Move things over from ent branch --- helper/consts/replication.go | 35 ++++++++++++++- helper/storagepacker/storagepacker.go | 3 +- helper/storagepacker/storagepacker_v1.go | 31 +++++++++++-- helper/testhelpers/testhelpers.go | 6 +-- vault/cluster.go | 6 +++ vault/core.go | 10 ++++- .../storagepacker/legacy_storagepacker.go | 18 +++++++- vault/ha.go | 43 +++++++++++++------ vault/identity_store.go | 10 +++-- vault/identity_store_util.go | 19 ++++---- vault/request_forwarding.go | 2 + vault/testing.go | 7 +++ 12 files changed, 152 insertions(+), 38 deletions(-) diff --git a/helper/consts/replication.go b/helper/consts/replication.go index bdad15522576f..a7e0edea1c562 100644 --- a/helper/consts/replication.go +++ b/helper/consts/replication.go @@ -16,7 +16,7 @@ const ( // ensure no overlap between old and new values. ReplicationUnknown ReplicationState = 0 - ReplicationPerformancePrimary ReplicationState = 1 << iota + ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here! ReplicationPerformanceSecondary OldSplitReplicationBootstrapping ReplicationDRPrimary @@ -51,6 +51,39 @@ func (r ReplicationState) string() string { return "unknown" } +func (r ReplicationState) StateStrings() []string { + var ret []string + if r.HasState(ReplicationPerformanceSecondary) { + ret = append(ret, "perf-secondary") + } + if r.HasState(ReplicationPerformancePrimary) { + ret = append(ret, "perf-primary") + } + if r.HasState(ReplicationPerformanceBootstrapping) { + ret = append(ret, "perf-bootstrapping") + } + if r.HasState(ReplicationPerformanceDisabled) { + ret = append(ret, "perf-disabled") + } + if r.HasState(ReplicationDRPrimary) { + ret = append(ret, "dr-primary") + } + if r.HasState(ReplicationDRSecondary) { + ret = append(ret, "dr-secondary") + } + if r.HasState(ReplicationDRBootstrapping) { + ret = append(ret, "dr-bootstrapping") + } + if r.HasState(ReplicationDRDisabled) { + ret = append(ret, "dr-disabled") + } + if r.HasState(ReplicationPerformanceStandby) { + ret = append(ret, "perfstandby") + } + + return ret +} + func (r ReplicationState) GetDRString() string { switch { case r.HasState(ReplicationDRBootstrapping): diff --git a/helper/storagepacker/storagepacker.go b/helper/storagepacker/storagepacker.go index 5b5c5d2d6b67b..9cfee14283ac9 100644 --- a/helper/storagepacker/storagepacker.go +++ b/helper/storagepacker/storagepacker.go @@ -12,7 +12,8 @@ type StoragePacker interface { BucketsView() *logical.StorageView BucketKeyHashByItemID(string) string GetCacheKey(string) string - GetBucket(context.Context, string) (*LockedBucket, error) + BucketKeys(context.Context) ([]string, error) + GetBucket(context.Context, string, bool) (*LockedBucket, error) DecodeBucket(*logical.StorageEntry) (*LockedBucket, error) PutBucket(context.Context, *LockedBucket) error DeleteBucket(context.Context, string) error diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v1.go index eaad1cc224d2b..9cf9aca754cba 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v1.go @@ -130,8 +130,33 @@ func (s *StoragePackerV1) GetCacheKey(key string) string { return strings.Replace(key, "/", "", -1) } +func (s *StoragePackerV1) BucketKeys(ctx context.Context) ([]string, error) { + keys := map[string]struct{}{} + diskBuckets, err := logical.CollectKeys(ctx, s.BucketStorageView) + if err != nil { + return nil, err + } + for _, bucket := range diskBuckets { + keys[bucket] = struct{}{} + } + + s.bucketsCacheLock.RLock() + s.bucketsCache.Walk(func(s string, _ interface{}) bool { + keys[s] = struct{}{} + return false + }) + s.bucketsCacheLock.RUnlock() + + ret := make([]string, 0, len(keys)) + for k := range keys { + ret = append(ret, k) + } + + return ret, nil +} + // Get returns a bucket for a given key -func (s *StoragePackerV1) GetBucket(ctx context.Context, key string) (*LockedBucket, error) { +func (s *StoragePackerV1) GetBucket(ctx context.Context, key string, skipCache bool) (*LockedBucket, error) { cacheKey := s.GetCacheKey(key) if key == "" { @@ -145,7 +170,7 @@ func (s *StoragePackerV1) GetBucket(ctx context.Context, key string) (*LockedBuc _, bucketRaw, found := s.bucketsCache.LongestPrefix(cacheKey) s.bucketsCacheLock.RUnlock() - if found { + if found && !skipCache { ret := bucketRaw.(*LockedBucket) lock.RUnlock() return ret, nil @@ -161,7 +186,7 @@ func (s *StoragePackerV1) GetBucket(ctx context.Context, key string) (*LockedBuc _, bucketRaw, found = s.bucketsCache.LongestPrefix(cacheKey) s.bucketsCacheLock.RUnlock() - if found { + if found && !skipCache { ret := bucketRaw.(*LockedBucket) return ret, nil } diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index ae0795a5029fe..61d2a219ab8b4 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -112,8 +112,7 @@ func EnsureCoresSealed(t testing.T, c *vault.TestCluster) { } func EnsureCoreSealed(t testing.T, core *vault.TestClusterCore) error { - client := core.Client - client.Sys().Seal() + core.Seal(t) timeout := time.Now().Add(60 * time.Second) for { if time.Now().After(timeout) { @@ -163,6 +162,7 @@ func EnsureCoreUnsealed(t testing.T, c *vault.TestCluster, core *vault.TestClust } func EnsureCoreIsPerfStandby(t testing.T, core *vault.TestClusterCore) { + t.Helper() start := time.Now() for { health, err := core.Client.Sys().Health() @@ -173,7 +173,7 @@ func EnsureCoreIsPerfStandby(t testing.T, core *vault.TestClusterCore) { break } time.Sleep(time.Millisecond * 500) - if time.Now().After(start.Add(time.Second * 30)) { + if time.Now().After(start.Add(time.Second * 60)) { t.Fatal("did not become a perf standby") } } diff --git a/vault/cluster.go b/vault/cluster.go index d8497201baa9c..5960c3b5d95aa 100644 --- a/vault/cluster.go +++ b/vault/cluster.go @@ -38,6 +38,12 @@ var ( ErrCannotForward = errors.New("cannot forward request; no connection or address not known") ) +type ClusterLeaderParams struct { + LeaderUUID string + LeaderRedirectAddr string + LeaderClusterAddr string +} + type ReplicatedClusters struct { DR *ReplicatedCluster Performance *ReplicatedCluster diff --git a/vault/core.go b/vault/core.go index 7c4dcdf5eb434..d0402f5841aa7 100644 --- a/vault/core.go +++ b/vault/core.go @@ -345,8 +345,11 @@ type Core struct { clusterLeaderRedirectAddr string // Most recent leader cluster addr clusterLeaderClusterAddr string - // Lock for the cluster leader values - clusterLeaderParamsLock sync.RWMutex + // Lock for the leader values, ensuring we don't run the parts of Leader() + // that change things concurrently + leaderParamsLock sync.RWMutex + // Current cluster leader values + clusterLeaderParams *atomic.Value // Info on cluster members clusterPeerClusterAddrsCache *cache.Cache // Stores whether we currently have a server running @@ -595,6 +598,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { allLoggers: conf.AllLoggers, builtinRegistry: conf.BuiltinRegistry, neverBecomeActive: new(uint32), + clusterLeaderParams: new(atomic.Value), } atomic.StoreUint32(c.sealed, 1) @@ -605,6 +609,8 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.localClusterParsedCert.Store((*x509.Certificate)(nil)) c.localClusterPrivateKey.Store((*ecdsa.PrivateKey)(nil)) + c.clusterLeaderParams.Store((*ClusterLeaderParams)(nil)) + c.activeContextCancelFunc.Store((context.CancelFunc)(nil)) if conf.ClusterCipherSuites != "" { diff --git a/vault/external_tests/storagepacker/legacy_storagepacker.go b/vault/external_tests/storagepacker/legacy_storagepacker.go index 778e88f9d54d6..e2eb1e32e2bec 100644 --- a/vault/external_tests/storagepacker/legacy_storagepacker.go +++ b/vault/external_tests/storagepacker/legacy_storagepacker.go @@ -62,8 +62,22 @@ func (s *LegacyStoragePacker) BucketsView() *logical.StorageView { return s.view } +func (s *LegacyStoragePacker) BucketKeys(ctx context.Context) ([]string, error) { + keys, err := logical.CollectKeys(ctx, s.view) + if err != nil { + return nil, err + } + ret := make([]string, 0, len(keys)) + for _, key := range keys { + if !strings.HasPrefix(key, "v2") { + ret = append(ret, key) + } + } + return ret, nil +} + // Get returns a bucket for a given key -func (s *LegacyStoragePacker) GetBucket(ctx context.Context, key string) (*sp2.LockedBucket, error) { +func (s *LegacyStoragePacker) GetBucket(ctx context.Context, key string, _ bool) (*sp2.LockedBucket, error) { if key == "" { return nil, fmt.Errorf("missing bucket key") } @@ -277,7 +291,7 @@ func (s *LegacyStoragePacker) GetItem(ctx context.Context, itemID string) (*sp2. bucketPath := s.BucketPath(bucketKey) // Fetch the bucket entry - bucket, err := s.GetBucket(ctx, bucketPath) + bucket, err := s.GetBucket(ctx, bucketPath, false) if err != nil { return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) } diff --git a/vault/ha.go b/vault/ha.go index e9813b9086dbc..b99b20043f132 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -108,28 +108,41 @@ func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err erro return false, "", "", nil } - c.clusterLeaderParamsLock.RLock() - localLeaderUUID := c.clusterLeaderUUID - localRedirAddr := c.clusterLeaderRedirectAddr - localClusterAddr := c.clusterLeaderClusterAddr - c.clusterLeaderParamsLock.RUnlock() + var localLeaderUUID, localRedirectAddr, localClusterAddr string + clusterLeaderParams := c.clusterLeaderParams.Load().(*ClusterLeaderParams) + if clusterLeaderParams != nil { + localLeaderUUID = clusterLeaderParams.LeaderUUID + localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr + localClusterAddr = clusterLeaderParams.LeaderClusterAddr + } // If the leader hasn't changed, return the cached value; nothing changes // mid-leadership, and the barrier caches anyways - if leaderUUID == localLeaderUUID && localRedirAddr != "" { + if leaderUUID == localLeaderUUID && localRedirectAddr != "" { c.stateLock.RUnlock() - return false, localRedirAddr, localClusterAddr, nil + return false, localRedirectAddr, localClusterAddr, nil } c.logger.Trace("found new active node information, refreshing") defer c.stateLock.RUnlock() - c.clusterLeaderParamsLock.Lock() - defer c.clusterLeaderParamsLock.Unlock() + c.leaderParamsLock.Lock() + defer c.leaderParamsLock.Unlock() // Validate base conditions again - if leaderUUID == c.clusterLeaderUUID && c.clusterLeaderRedirectAddr != "" { - return false, localRedirAddr, localClusterAddr, nil + clusterLeaderParams = c.clusterLeaderParams.Load().(*ClusterLeaderParams) + if clusterLeaderParams != nil { + localLeaderUUID = clusterLeaderParams.LeaderUUID + localRedirectAddr = clusterLeaderParams.LeaderRedirectAddr + localClusterAddr = clusterLeaderParams.LeaderClusterAddr + } else { + localLeaderUUID = "" + localRedirectAddr = "" + localClusterAddr = "" + } + + if leaderUUID == localLeaderUUID && localRedirectAddr != "" { + return false, localRedirectAddr, localClusterAddr, nil } key := coreLeaderPrefix + leaderUUID @@ -174,9 +187,11 @@ func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err erro // Don't set these until everything has been parsed successfully or we'll // never try again - c.clusterLeaderRedirectAddr = adv.RedirectAddr - c.clusterLeaderClusterAddr = adv.ClusterAddr - c.clusterLeaderUUID = leaderUUID + c.clusterLeaderParams.Store(&ClusterLeaderParams{ + LeaderUUID: leaderUUID, + LeaderRedirectAddr: adv.RedirectAddr, + LeaderClusterAddr: adv.ClusterAddr, + }) return false, adv.RedirectAddr, adv.ClusterAddr, nil } diff --git a/vault/identity_store.go b/vault/identity_store.go index 844a7b042957e..ce08b5ccaf68e 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -163,12 +163,14 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.entityPacker.GetBucket(ctx, strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) + bucket, err := i.entityPacker.GetBucket(ctx, strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix()), true) if err != nil { i.logger.Error("failed to refresh entities", "key", key, "error", err) return } + i.logger.Trace("got bucket to invalidate", "key", key, "bucket_nil", bucket == nil) + // If the underlying entry is nil, it means that this invalidation // notification is for the deletion of the underlying storage entry. At // this point, since all the entities belonging to this bucket are @@ -190,6 +192,8 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { return } + i.logger.Trace("found entity name", "name", entity.Name) + // Only update MemDB and don't touch the storage err = i.upsertEntityInTxn(ctx, txn, entity, nil, false) if err != nil { @@ -201,7 +205,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } if parsedCount > 0 { - i.logger.Trace("parsed entities for invalidation", "num_entities", parsedCount) + i.logger.Trace("parsed entities for invalidation", "key", key, "num_entities", parsedCount) } else { i.logger.Error("found no groups", "bucket", pretty.Sprint(bucket)) } @@ -234,7 +238,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { } // Get the storage bucket entry - bucket, err := i.groupPacker.GetBucket(ctx, strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) + bucket, err := i.groupPacker.GetBucket(ctx, strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix()), true) if err != nil { i.logger.Error("failed to refresh group", "key", key, "error", err) return diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index a5f7640323249..878b1de4cb598 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -33,11 +33,13 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { // Check for the legacy -> v2 upgrade case upgradeLegacyStoragePacker := func(prefix string, packer storagepacker.StoragePacker) error { + c.logger.Trace("checking for identity storage packer upgrade", "prefix", prefix) bucketStorageView := logical.NewStorageView(c.identityStore.view, prefix+"buckets/") vals, err := bucketStorageView.List(ctx, "") if err != nil { return err } + c.logger.Trace("found buckets", "buckets", vals) bucketsToUpgrade := make([]string, 0, 256) for _, val := range vals { if val == "v2/" { @@ -52,11 +54,13 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { packer.SetQueueMode(true) for _, key := range bucketsToUpgrade { + c.logger.Trace("upgrading bucket", "key", key) storageEntry, err := bucketStorageView.Get(ctx, key) if err != nil { return err } if storageEntry == nil { + c.logger.Trace("bucket nil") // Not clear what to do here really, but if there's really // nothing there, nothing to load, so continue continue @@ -66,6 +70,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { return err } // Set to the new prefix + c.logger.Trace("found bucket entries", "num", len(bucket.Items)) for _, item := range bucket.Items { packer.PutItem(ctx, item) } @@ -81,7 +86,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { // here; when the old buckets get removed on the primary, it will then // be essentially a noop here and when memdb tries to expire entries it // will just not find any. I think. - if !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary | consts.ReplicationPerformanceStandby) { + if !c.perfStandby && !c.ReplicationState().HasState(consts.ReplicationPerformanceSecondary|consts.ReplicationPerformanceStandby) { if err := packer.FlushQueue(ctx); err != nil { return err } @@ -148,7 +153,7 @@ func (i *IdentityStore) sanitizeName(name string) string { func (i *IdentityStore) loadGroups(ctx context.Context) error { i.logger.Debug("identity loading groups") - allBuckets, err := logical.CollectKeys(ctx, i.groupPacker.BucketsView()) + allBuckets, err := i.groupPacker.BucketKeys(ctx) if err != nil { return errwrap.Wrapf("failed to scan for group buckets: {{err}}", err) } @@ -182,7 +187,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { return } - bucket, err := i.groupPacker.GetBucket(ctx, bucketKey) + bucket, err := i.groupPacker.GetBucket(ctx, bucketKey, false) if err != nil { errs <- err continue @@ -263,10 +268,6 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { } } - if i.logger.IsDebug() { - i.logger.Debug("loading group", "name", group.Name, "id", group.ID) - } - txn := i.db.Txn(true) // Before pull#5786, entity memberships in groups were not getting @@ -311,7 +312,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { func (i *IdentityStore) loadEntities(ctx context.Context) error { // Accumulate existing entities i.logger.Debug("loading entities") - allBuckets, err := logical.CollectKeys(ctx, i.entityPacker.BucketsView()) + allBuckets, err := i.entityPacker.BucketKeys(ctx) if err != nil { return errwrap.Wrapf("failed to scan for entity buckets: {{err}}", err) } @@ -343,7 +344,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { return } - bucket, err := i.entityPacker.GetBucket(ctx, bucketKey) + bucket, err := i.entityPacker.GetBucket(ctx, bucketKey, false) if err != nil { errs <- err continue diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go index d0fbd2865c95a..ff0eb5fd42bed 100644 --- a/vault/request_forwarding.go +++ b/vault/request_forwarding.go @@ -397,6 +397,8 @@ func (c *Core) clearForwardingClients() { c.rpcClientConnContext = nil c.rpcForwardingClient = nil + + c.clusterLeaderParams.Store((*ClusterLeaderParams)(nil)) } // ForwardRequest forwards a given request to the active node and returns the diff --git a/vault/testing.go b/vault/testing.go index 13021068a9dd3..c5e9c4bce2e27 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -789,6 +789,13 @@ func (c *TestCluster) EnsureCoresSealed(t testing.T) { } } +func (c *TestClusterCore) Seal(t testing.T) { + t.Helper() + if err := c.Core.sealInternal(); err != nil { + t.Fatal(err) + } +} + func CleanupClusters(clusters []*TestCluster) { wg := &sync.WaitGroup{} for _, cluster := range clusters { From d9af12c778eb405a0b3c39d3a568a143aa9fa278 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 5 Feb 2019 12:09:02 -0500 Subject: [PATCH 23/38] Remove extra debuggging statements --- vault/identity_store.go | 8 +------- vault/identity_store_util.go | 9 ++++----- 2 files changed, 5 insertions(+), 12 deletions(-) diff --git a/vault/identity_store.go b/vault/identity_store.go index ce08b5ccaf68e..cd10b4911c7a0 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -120,7 +120,7 @@ func (i *IdentityStore) paths() []*framework.Path { // storage entries that get updated. The value needs to be read and MemDB needs // to be updated accordingly. func (i *IdentityStore) Invalidate(ctx context.Context, key string) { - i.logger.Debug("invalidate notification received", "key", key) + i.logger.Trace("invalidate notification received", "key", key) i.lock.Lock() defer i.lock.Unlock() @@ -128,7 +128,6 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { switch { // Check if the key is a storage entry key for an entity bucket case strings.HasPrefix(key, i.entityPacker.BucketsView().Prefix()): - i.logger.Trace("found entity bucket for invalidation", "key", key, "prefix", i.entityPacker.BucketsView().Prefix()) bucketKeyHash := i.entityPacker.GetCacheKey(strings.TrimPrefix(key, i.entityPacker.BucketsView().Prefix())) // Create a MemDB transaction @@ -169,8 +168,6 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { return } - i.logger.Trace("got bucket to invalidate", "key", key, "bucket_nil", bucket == nil) - // If the underlying entry is nil, it means that this invalidation // notification is for the deletion of the underlying storage entry. At // this point, since all the entities belonging to this bucket are @@ -192,8 +189,6 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { return } - i.logger.Trace("found entity name", "name", entity.Name) - // Only update MemDB and don't touch the storage err = i.upsertEntityInTxn(ctx, txn, entity, nil, false) if err != nil { @@ -215,7 +210,6 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { // Check if the key is a storage entry key for an group bucket case strings.HasPrefix(key, i.groupPacker.BucketsView().Prefix()): - i.logger.Trace("found group bucket for invalidation", "key", key, "prefix", i.groupPacker.BucketsView().Prefix()) bucketKeyHash := i.groupPacker.GetCacheKey(strings.TrimPrefix(key, i.groupPacker.BucketsView().Prefix())) // Create a MemDB transaction diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 878b1de4cb598..22c796b9c1a4e 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -33,13 +33,13 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { // Check for the legacy -> v2 upgrade case upgradeLegacyStoragePacker := func(prefix string, packer storagepacker.StoragePacker) error { - c.logger.Trace("checking for identity storage packer upgrade", "prefix", prefix) + c.logger.Trace("checking for identity storagepacker upgrade", "prefix", prefix) bucketStorageView := logical.NewStorageView(c.identityStore.view, prefix+"buckets/") vals, err := bucketStorageView.List(ctx, "") if err != nil { return err } - c.logger.Trace("found buckets", "buckets", vals) + c.logger.Trace("found buckets to upgrade", "buckets", vals) bucketsToUpgrade := make([]string, 0, 256) for _, val := range vals { if val == "v2/" { @@ -70,7 +70,6 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { return err } // Set to the new prefix - c.logger.Trace("found bucket entries", "num", len(bucket.Items)) for _, item := range bucket.Items { packer.PutItem(ctx, item) } @@ -210,7 +209,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { defer wg.Done() for j, bucketKey := range allBuckets { if j%50 == 0 { - i.logger.Debug("groups buckets loading", "progress", j) + i.logger.Debug("group buckets loading", "progress", j) } select { @@ -367,7 +366,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { defer wg.Done() for j, bucketKey := range allBuckets { if j%50 == 0 { - i.logger.Debug("entities buckets loading", "progress", j) + i.logger.Debug("entity buckets loading", "progress", j) } select { From d754c6edbf03455b2516bb378d0d4237f7bdddc0 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 5 Feb 2019 21:07:14 -0500 Subject: [PATCH 24/38] Rename StoragePacker to v2 --- ...toragepacker_v1.go => storagepacker_v2.go} | 38 +- helper/storagepacker/storagepacker_v2.notgo | 534 ------------------ ...er_v1_test.go => storagepacker_v2_test.go} | 10 +- .../storagepacker/storagepacker_v2_test.notgo | 253 --------- .../storagepacker_upgrade_test.go | 2 +- vault/identity_store.go | 2 +- 6 files changed, 26 insertions(+), 813 deletions(-) rename helper/storagepacker/{storagepacker_v1.go => storagepacker_v2.go} (93%) delete mode 100644 helper/storagepacker/storagepacker_v2.notgo rename helper/storagepacker/{storagepacker_v1_test.go => storagepacker_v2_test.go} (93%) delete mode 100644 helper/storagepacker/storagepacker_v2_test.notgo diff --git a/helper/storagepacker/storagepacker_v1.go b/helper/storagepacker/storagepacker_v2.go similarity index 93% rename from helper/storagepacker/storagepacker_v1.go rename to helper/storagepacker/storagepacker_v2.go index 9cf9aca754cba..344aeb18e9934 100644 --- a/helper/storagepacker/storagepacker_v1.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -61,7 +61,7 @@ type Config struct { // number of items. This is the second version of the utility which supports // indefinitely expanding the capacity of the storage by sharding the buckets // when they exceed the imposed limit. -type StoragePackerV1 struct { +type StoragePackerV2 struct { *Config storageLocks []*locksutil.LockEntry bucketsCache *radix.Tree @@ -85,11 +85,11 @@ type LockedBucket struct { *Bucket } -func (s *StoragePackerV1) BucketsView() *logical.StorageView { +func (s *StoragePackerV2) BucketsView() *logical.StorageView { return s.BucketStorageView } -func (s *StoragePackerV1) BucketStorageKeyForItemID(itemID string) string { +func (s *StoragePackerV2) BucketStorageKeyForItemID(itemID string) string { hexVal := hex.EncodeToString(cryptoutil.Blake2b256Hash(itemID)) s.bucketsCacheLock.RLock() @@ -122,15 +122,15 @@ func (s *StoragePackerV1) BucketStorageKeyForItemID(itemID string) string { return cacheKey } -func (s *StoragePackerV1) BucketKeyHashByItemID(itemID string) string { +func (s *StoragePackerV2) BucketKeyHashByItemID(itemID string) string { return s.GetCacheKey(s.BucketStorageKeyForItemID(itemID)) } -func (s *StoragePackerV1) GetCacheKey(key string) string { +func (s *StoragePackerV2) GetCacheKey(key string) string { return strings.Replace(key, "/", "", -1) } -func (s *StoragePackerV1) BucketKeys(ctx context.Context) ([]string, error) { +func (s *StoragePackerV2) BucketKeys(ctx context.Context) ([]string, error) { keys := map[string]struct{}{} diskBuckets, err := logical.CollectKeys(ctx, s.BucketStorageView) if err != nil { @@ -156,7 +156,7 @@ func (s *StoragePackerV1) BucketKeys(ctx context.Context) ([]string, error) { } // Get returns a bucket for a given key -func (s *StoragePackerV1) GetBucket(ctx context.Context, key string, skipCache bool) (*LockedBucket, error) { +func (s *StoragePackerV2) GetBucket(ctx context.Context, key string, skipCache bool) (*LockedBucket, error) { cacheKey := s.GetCacheKey(key) if key == "" { @@ -215,7 +215,7 @@ func (s *StoragePackerV1) GetBucket(ctx context.Context, key string, skipCache b // NOTE: Don't put inserting into the cache here, as that will mess with // upgrade cases for the identity store as we want to keep the bucket out of // the cache until we actually re-store it. -func (s *StoragePackerV1) DecodeBucket(storageEntry *logical.StorageEntry) (*LockedBucket, error) { +func (s *StoragePackerV2) DecodeBucket(storageEntry *logical.StorageEntry) (*LockedBucket, error) { uncompressedData, notCompressed, err := compressutil.Decompress(storageEntry.Value) if err != nil { return nil, errwrap.Wrapf("failed to decompress packed storage entry: {{err}}", err) @@ -239,7 +239,7 @@ func (s *StoragePackerV1) DecodeBucket(storageEntry *logical.StorageEntry) (*Loc } // Put stores a packed bucket entry -func (s *StoragePackerV1) PutBucket(ctx context.Context, bucket *LockedBucket) error { +func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -269,7 +269,7 @@ func (s *StoragePackerV1) PutBucket(ctx context.Context, bucket *LockedBucket) e } // storeBucket actually stores the bucket. It expects that it's already locked. -func (s *StoragePackerV1) storeBucket(ctx context.Context, bucket *LockedBucket) error { +func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket) error { if atomic.LoadUint32(&s.queueMode) == 1 { s.queuedBuckets.Store(bucket.Key, bucket) return nil @@ -300,7 +300,7 @@ func (s *StoragePackerV1) storeBucket(ctx context.Context, bucket *LockedBucket) } // DeleteBucket deletes an entire bucket entry -func (s *StoragePackerV1) DeleteBucket(ctx context.Context, key string) error { +func (s *StoragePackerV2) DeleteBucket(ctx context.Context, key string) error { if key == "" { return fmt.Errorf("missing key") } @@ -347,7 +347,7 @@ func (s *LockedBucket) upsert(item *Item) error { // DeleteItem removes the storage entry which the given key refers to from its // corresponding bucket. -func (s *StoragePackerV1) DeleteItem(ctx context.Context, itemID string) error { +func (s *StoragePackerV2) DeleteItem(ctx context.Context, itemID string) error { if itemID == "" { return fmt.Errorf("empty item ID") } @@ -406,7 +406,7 @@ func (s *StoragePackerV1) DeleteItem(ctx context.Context, itemID string) error { // GetItem fetches the storage entry for a given key from its corresponding // bucket. -func (s *StoragePackerV1) GetItem(ctx context.Context, itemID string) (*Item, error) { +func (s *StoragePackerV2) GetItem(ctx context.Context, itemID string) (*Item, error) { if itemID == "" { return nil, fmt.Errorf("empty item ID") } @@ -467,7 +467,7 @@ func (s *StoragePackerV1) GetItem(ctx context.Context, itemID string) (*Item, er } // PutItem stores a storage entry in its corresponding bucket -func (s *StoragePackerV1) PutItem(ctx context.Context, item *Item) error { +func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { if item == nil { return fmt.Errorf("nil item") } @@ -528,8 +528,8 @@ func (s *StoragePackerV1) PutItem(ctx context.Context, item *Item) error { return s.storeBucket(ctx, bucket) } -// NewStoragePackerV1 creates a new storage packer for a given view -func NewStoragePackerV1(ctx context.Context, config *Config) (StoragePacker, error) { +// NewStoragePackerV2 creates a new storage packer for a given view +func NewStoragePackerV2(ctx context.Context, config *Config) (StoragePacker, error) { if config.BucketStorageView == nil { return nil, fmt.Errorf("nil buckets view") } @@ -605,7 +605,7 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (StoragePacker, err } // Create a new packer object for the given view - packer := &StoragePackerV1{ + packer := &StoragePackerV2{ Config: config, bucketsCache: radix.New(), storageLocks: locksutil.CreateLocks(), @@ -614,7 +614,7 @@ func NewStoragePackerV1(ctx context.Context, config *Config) (StoragePacker, err return packer, nil } -func (s *StoragePackerV1) SetQueueMode(enabled bool) { +func (s *StoragePackerV2) SetQueueMode(enabled bool) { if enabled { atomic.StoreUint32(&s.queueMode, 1) } else { @@ -622,7 +622,7 @@ func (s *StoragePackerV1) SetQueueMode(enabled bool) { } } -func (s *StoragePackerV1) FlushQueue(ctx context.Context) error { +func (s *StoragePackerV2) FlushQueue(ctx context.Context) error { var err *multierror.Error s.queuedBuckets.Range(func(key, value interface{}) bool { lErr := s.storeBucket(ctx, value.(*LockedBucket)) diff --git a/helper/storagepacker/storagepacker_v2.notgo b/helper/storagepacker/storagepacker_v2.notgo deleted file mode 100644 index 83b96e04c180d..0000000000000 --- a/helper/storagepacker/storagepacker_v2.notgo +++ /dev/null @@ -1,534 +0,0 @@ -package storagepacker - -import ( - "context" - "fmt" - "math" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/hashicorp/errwrap" - "github.com/hashicorp/vault/helper/strutil" - - "github.com/hashicorp/vault/helper/cryptoutil" - "github.com/hashicorp/vault/logical" -) - -// Clone creates a replica of the bucket -func (b *BucketV2) Clone() (*BucketV2, error) { - if b == nil { - return nil, fmt.Errorf("nil bucket") - } - - marshaledBucket, err := proto.Marshal(b) - if err != nil { - return nil, fmt.Errorf("failed to marshal bucket: %v", err) - } - - var clonedBucket BucketV2 - err = proto.Unmarshal(marshaledBucket, &clonedBucket) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal bucket: %v", err) - } - - return &clonedBucket, nil -} - -// Get reads a bucket from the storage -func (s *StoragePackerV2) GetBucket(key string) (*LockedBucket, error) { - if key == "" { - return nil, fmt.Errorf("missing bucket key") - } - - raw, exists := s.bucketsCache.Get(key) - if exists { - return raw.(*LockedBucket), nil - } - - // Read from the underlying view - entry, err := s.config.View.Get(context.Background(), key) - if err != nil { - return nil, errwrap.Wrapf("failed to read bucket: {{err}}", err) - } - if entry == nil { - return nil, nil - } - - var bucket BucketV2 - err = proto.Unmarshal(entry.Value, &bucket) - if err != nil { - return nil, errwrap.Wrapf("failed to decode bucket: {{err}}", err) - } - - // Serializing and deserializing a proto message with empty map translates - // to a nil. Ensure that the required fields are initialized properly. - if bucket.Buckets == nil { - bucket.Buckets = make(map[string]*BucketV2) - } - if bucket.Items == nil { - bucket.Items = make(map[string]*Item) - } - - // Update the unencrypted size of the bucket - bucket.Size = int64(len(entry.Value)) - - lb := &LockedBucket{ - BucketV2: &bucket, - } - s.bucketsCache.Insert(bucket.Key, lb) - - return lb, nil -} - -// Put stores a bucket in storage -func (s *StoragePackerV2) PutBucket(bucket *LockedBucket) error { - if bucket == nil { - return fmt.Errorf("nil bucket entry") - } - - if bucket.Key == "" { - return fmt.Errorf("missing bucket key") - } - - if !strings.HasPrefix(bucket.Key, s.config.ViewPrefix) { - return fmt.Errorf("bucket entry key should have %q prefix", s.config.ViewPrefix) - } - - marshaledBucket, err := proto.Marshal(bucket.BucketV2) - if err != nil { - return err - } - - err = s.config.View.Put(context.Background(), &logical.StorageEntry{ - Key: bucket.Key, - Value: marshaledBucket, - }) - if err != nil { - return err - } - - bucket.Size = int64(len(marshaledBucket)) - - s.bucketsCache.Insert(bucket.Key, bucket) - - return nil -} - -// putItem is a recursive function that finds the appropriate bucket -// to store the item based on the storage space available in the buckets. -func (s *StoragePackerV2) putItem(bucket *LockedBucket, item *Item, depth int) (string, error) { - // Bucket will be nil for the first time when its not known which base - // level bucket the item belongs to. - if bucket == nil { - // Enforce zero depth - depth = 0 - - // Compute the index of the base bucket - baseIndex, err := s.baseBucketIndex(item.ID) - if err != nil { - return "", err - } - - // Prepend the index with the prefix - baseKey := s.config.ViewPrefix + baseIndex - - // Check if the base bucket exists - bucket, err = s.GetBucket(baseKey) - if err != nil { - return "", err - } - - // If the base bucket does not exist, create one - if bucket == nil { - bucket = s.newBucket(baseKey) - } - } - - // Compute the shard index to which the item belongs - shardIndex, err := s.shardBucketIndex(item.ID, depth) - if err != nil { - return "", errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) - } - shardKey := bucket.Key + "/" + shardIndex - - // Acquire lock on the bucket - bucket.lock.Lock() - - if bucket.Sharded { - // If the bucket is already sharded out, release the lock and continue - // insertion at the next level. - bucket.lock.Unlock() - shardedBucket, err := s.GetBucket(shardKey) - if err != nil { - return "", err - } - if shardedBucket == nil { - shardedBucket = s.newBucket(shardKey) - } - return s.putItem(shardedBucket, item, depth+1) - } - - // From this point on, the item may get inserted either in the current - // bucket or at its next level. In both cases, there will be a need to - // persist the current bucket. Hence the lock on the current bucket is - // deferred. - defer bucket.lock.Unlock() - - // Check if a bucket shard is already present for the shard index. If not, - // create one. - bucketShard, ok := bucket.Buckets[shardIndex] - if !ok { - bucketShard = s.newBucket(shardKey).BucketV2 - bucket.Buckets[shardIndex] = bucketShard - } - - // Check if the insertion of the item makes the bucket size exceed the - // limit. - exceedsLimit, err := s.bucketExceedsSizeLimit(bucket, item) - if err != nil { - return "", err - } - - // If the bucket size after addition of the item doesn't exceed the limit, - // insert the item persist the bucket. - if !exceedsLimit { - bucketShard.Items[item.ID] = item - return bucket.Key, s.PutBucket(bucket) - } - - // The bucket size after addition of the item exceeds the size limit. Split - // the bucket into shards. - err = s.splitBucket(bucket, depth) - if err != nil { - return "", err - } - - shardedBucket, err := s.GetBucket(bucketShard.Key) - if err != nil { - return "", err - } - - bucketKey, err := s.putItem(shardedBucket, item, depth+1) - if err != nil { - return "", err - } - - return bucketKey, s.PutBucket(bucket) -} - -// getItem is a recursive function that fetches the given item ID in -// the bucket hierarchy -func (s *StoragePackerV2) getItem(bucket *LockedBucket, itemID string, depth int) (*Item, error) { - if bucket == nil { - // Enforce zero depth - depth = 0 - - baseIndex, err := s.baseBucketIndex(itemID) - if err != nil { - return nil, err - } - - bucket, err = s.GetBucket(s.config.ViewPrefix + baseIndex) - if err != nil { - return nil, errwrap.Wrapf("failed to read packed storage item: {{err}}", err) - } - } - - if bucket == nil { - return nil, nil - } - - shardIndex, err := s.shardBucketIndex(itemID, depth) - if err != nil { - return nil, errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) - } - - shardKey := bucket.Key + "/" + shardIndex - - bucket.lock.RLock() - - if bucket.Sharded { - bucket.lock.RUnlock() - shardedBucket, err := s.GetBucket(shardKey) - if err != nil { - return nil, err - } - if shardedBucket == nil { - return nil, nil - } - return s.getItem(shardedBucket, itemID, depth+1) - } - - defer bucket.lock.RUnlock() - - bucketShard, ok := bucket.Buckets[shardIndex] - if !ok { - return nil, nil - } - - if bucketShard == nil { - return nil, nil - } - - return bucketShard.Items[itemID], nil -} - -// deleteItem is a recursive function that finds the bucket holding -// the item and removes the item from it -func (s *StoragePackerV2) deleteItem(bucket *LockedBucket, itemID string, depth int) error { - if bucket == nil { - // Enforce zero depth - depth = 0 - - baseIndex, err := s.baseBucketIndex(itemID) - if err != nil { - return err - } - - bucket, err = s.GetBucket(s.config.ViewPrefix + baseIndex) - if err != nil { - return errwrap.Wrapf("failed to read packed storage item: {{err}}", err) - } - } - - if bucket == nil { - return nil - } - - shardIndex, err := s.shardBucketIndex(itemID, depth) - if err != nil { - return errwrap.Wrapf("failed to compute the bucket shard index: {{err}}", err) - } - - shardKey := bucket.Key + "/" + shardIndex - - bucket.lock.Lock() - - if bucket.Sharded { - bucket.lock.Unlock() - shardedBucket, err := s.GetBucket(shardKey) - if err != nil { - return err - } - if shardedBucket == nil { - return nil - } - return s.deleteItem(shardedBucket, itemID, depth+1) - } - - defer bucket.lock.Unlock() - - bucketShard, ok := bucket.Buckets[shardIndex] - if !ok { - return nil - } - - if bucketShard == nil { - return nil - } - - delete(bucketShard.Items, itemID) - - return s.PutBucket(bucket) -} - -// GetItem fetches the item using the given item identifier -func (s *StoragePackerV2) GetItem(itemID string) (*Item, error) { - if itemID == "" { - return nil, fmt.Errorf("empty item ID") - } - - return s.getItem(nil, itemID, 0) -} - -// PutItem persists the given item -func (s *StoragePackerV2) PutItem(item *Item) (string, error) { - if item == nil { - return "", fmt.Errorf("nil item") - } - - if item.ID == "" { - return "", fmt.Errorf("missing ID in item") - } - - bucketKey, err := s.putItem(nil, item, 0) - if err != nil { - return "", err - } - - return bucketKey, nil -} - -// DeleteItem removes the item using the given item identifier -func (s *StoragePackerV2) DeleteItem(itemID string) error { - if itemID == "" { - return fmt.Errorf("empty item ID") - } - - return s.deleteItem(nil, itemID, 0) -} - -// bucketExceedsSizeLimit computes if the given bucket is exceeding the -// configured size limit on the storage packer -func (s *StoragePackerV2) bucketExceedsSizeLimit(bucket *LockedBucket, item *Item) (bool, error) { - marshaledItem, err := proto.Marshal(item) - if err != nil { - return false, fmt.Errorf("failed to marshal item: %v", err) - } - - expectedBucketSize := bucket.Size + int64(len(marshaledItem)) - - // The objects that leave storage packer to get persisted get inflated due - // to extra bits coming off of encryption. So, we consider the bucket to be - // full much earlier to compensate for the encryption overhead. Testing - // with the threshold of 70% of the max size resulted in object sizes - // coming dangerously close to the actual limit. Hence, setting 60% as the - // cut-off value. This is purely a heuristic threshold. - max := math.Ceil((float64(s.config.BucketMaxSize) * float64(60)) / float64(100)) - - return float64(expectedBucketSize) > max, nil -} - -func (s *StoragePackerV2) splitBucket(bucket *LockedBucket, depth int) error { - for _, shard := range bucket.Buckets { - for itemID, item := range shard.Items { - if shard.Buckets == nil { - shard.Buckets = make(map[string]*BucketV2) - } - subShardIndex, err := s.shardBucketIndex(itemID, depth+1) - if err != nil { - return err - } - subShard, ok := shard.Buckets[subShardIndex] - if !ok { - subShardKey := shard.Key + "/" + subShardIndex - subShard = s.newBucket(subShardKey).BucketV2 - shard.Buckets[subShardIndex] = subShard - } - subShard.Items[itemID] = item - } - - shard.Items = nil - err := s.PutBucket(&LockedBucket{BucketV2: shard}) - if err != nil { - return err - } - } - bucket.Buckets = nil - bucket.Sharded = true - return nil -} - -// baseBucketIndex returns the index of the base bucket to which the -// given item belongs -func (s *StoragePackerV2) baseBucketIndex(itemID string) (string, error) { - // Hash the item ID - hashVal, err := cryptoutil.Blake2b256Hash(itemID) - if err != nil { - return "", err - } - - // Extract the index value of the base bucket from the hash of the item ID - return strutil.BitMaskedIndexHex(hashVal, bitsNeeded(s.config.BucketBaseCount)) -} - -// shardBucketIndex returns the index of the bucket shard to which the given -// item belongs at a particular depth. -func (s *StoragePackerV2) shardBucketIndex(itemID string, depth int) (string, error) { - // Hash the item ID - hashVal, err := cryptoutil.Blake2b256Hash(itemID) - if err != nil { - return "", err - } - - // Compute the bits required to enumerate base buckets - shardsBitCount := bitsNeeded(s.config.BucketShardCount) - - // Compute the bits that are already consumed by the base bucket and the - // shards at previous levels. - ignoreBits := bitsNeeded(s.config.BucketBaseCount) + depth*shardsBitCount - - // Extract the index value of the bucket shard from the hash of the item ID - return strutil.BitMaskedIndexHex(hashVal[ignoreBits:], shardsBitCount) -} - -// bitsNeeded returns the minimum number of bits required to enumerate the -// natural numbers below the given value -func bitsNeeded(value int) int { - if value < 2 { - return 1 - } - bitCount := int(math.Ceil(math.Log2(float64(value)))) - if isPowerOfTwo(value) { - bitCount++ - } - return bitCount -} - -// isPowerOfTwo returns true if the given value is a power of two, false -// otherwise. -func isPowerOfTwo(val int) bool { - return val != 0 && (val&(val-1) == 0) -} - -func (s *StoragePackerV2) newBucket(key string) *LockedBucket { - return &LockedBucket{ - BucketV2: &BucketV2{ - Key: key, - Buckets: make(map[string]*BucketV2), - Items: make(map[string]*Item), - }, - } -} - -type WalkFunc func(item *Item) error - -// Walk traverses through all the buckets and all the items in each bucket and -// invokes the given function on each item. -func (s *StoragePackerV2) Walk(fn WalkFunc) error { - var err error - for base := 0; base < s.config.BucketBaseCount; base++ { - baseKey := s.config.ViewPrefix + strconv.FormatInt(int64(base), 16) - err = s.bucketWalk(baseKey, fn) - if err != nil { - return err - } - } - return nil -} - -// bucketWalk is a pre-order traversal of the bucket hierarchy starting from -// the bucket corresponding to the given key. The function fn will be called on -// all the items in the hierarchy. -func (s *StoragePackerV2) bucketWalk(key string, fn WalkFunc) error { - bucket, err := s.GetBucket(key) - if err != nil { - return err - } - if bucket == nil { - return nil - } - - if !bucket.Sharded { - for _, b := range bucket.Buckets { - for _, item := range b.Items { - err := fn(item) - if err != nil { - return err - } - } - } - return nil - } - - for i := 0; i < s.config.BucketShardCount; i++ { - shardKey := bucket.Key + "/" + strconv.FormatInt(int64(i), 16) - err = s.bucketWalk(shardKey, fn) - if err != nil { - return err - } - } - - return nil -} diff --git a/helper/storagepacker/storagepacker_v1_test.go b/helper/storagepacker/storagepacker_v2_test.go similarity index 93% rename from helper/storagepacker/storagepacker_v1_test.go rename to helper/storagepacker/storagepacker_v2_test.go index b9bde4fcfe8a1..48ec063fe11f7 100644 --- a/helper/storagepacker/storagepacker_v1_test.go +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -13,10 +13,10 @@ import ( "github.com/hashicorp/vault/logical" ) -func getStoragePacker(tb testing.TB) *StoragePackerV1 { +func getStoragePacker(tb testing.TB) *StoragePackerV2 { storage := &logical.InmemStorage{} storageView := logical.NewStorageView(storage, "packer/buckets/v2") - storagePacker, err := NewStoragePackerV1(context.Background(), &Config{ + storagePacker, err := NewStoragePackerV2(context.Background(), &Config{ BucketStorageView: storageView, ConfigStorageView: logical.NewStorageView(storage, "packer/config"), Logger: log.New(&log.LoggerOptions{Name: "storagepackertest"}), @@ -27,7 +27,7 @@ func getStoragePacker(tb testing.TB) *StoragePackerV1 { return storagePacker } -func BenchmarkStoragePackerV1(b *testing.B) { +func BenchmarkStoragePackerV2(b *testing.B) { storagePacker := getStoragePacker(b) for i := 0; i < b.N; i++ { @@ -73,7 +73,7 @@ func BenchmarkStoragePackerV1(b *testing.B) { } } -func TestStoragePackerV1(t *testing.T) { +func TestStoragePackerV2(t *testing.T) { storagePacker := getStoragePacker(t) // Persist a storage entry @@ -116,7 +116,7 @@ func TestStoragePackerV1(t *testing.T) { } } -func TestStoragePackerV1_SerializeDeserializeComplexItem_Version1(t *testing.T) { +func TestStoragePackerV2_SerializeDeserializeComplexItem_Version1(t *testing.T) { storagePacker := getStoragePacker(t) timeNow := ptypes.TimestampNow() diff --git a/helper/storagepacker/storagepacker_v2_test.notgo b/helper/storagepacker/storagepacker_v2_test.notgo deleted file mode 100644 index 9374408ba0167..0000000000000 --- a/helper/storagepacker/storagepacker_v2_test.notgo +++ /dev/null @@ -1,253 +0,0 @@ -package storagepacker - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "testing" - - "github.com/golang/protobuf/ptypes" - "github.com/hashicorp/vault/helper/identity" - "github.com/hashicorp/vault/helper/logging" - "github.com/hashicorp/vault/logical" - - log "github.com/hashicorp/go-hclog" -) - -const ( - testIterationCount = 5000 - //testBucketBaseCount = defaultBucketBaseCount - //testBucketShardCount = defaultBucketShardCount - testBucketMaxSize = defaultBucketMaxSize - - testBucketBaseCount = 1 - testBucketShardCount = 2 -) - -func TestStoragePacker_bitsNeeded(t *testing.T) { - testData := map[int]int{ - -1: 1, - 0: 1, - 1: 1, - 2: 2, - 3: 2, - 4: 3, - 7: 3, - 8: 4, - 15: 4, - 16: 5, - 25: 5, - 32: 6, - 64: 7, - } - for value, expected := range testData { - if bitsNeeded(value) != expected { - t.Fatalf("expected bit count of %d for %d", expected, value) - } - } -} - -func TestStoragePackerV2_Walk(t *testing.T) { - sp, err := NewStoragePackerV2(&Config{ - BucketBaseCount: testBucketBaseCount, - BucketShardCount: testBucketShardCount, - BucketMaxSize: testBucketMaxSize, - View: &logical.InmemStorage{}, - Logger: logging.NewVaultLogger(log.Trace), - }) - if err != nil { - t.Fatal(err) - } - - entity := &identity.Entity{ - Metadata: map[string]string{ - "samplekey1": "samplevalue1", - "samplekey2": "samplevalue2", - "samplekey3": "samplevalue3", - "samplekey4": "samplevalue4", - "samplekey5": "samplevalue5", - }, - } - - testPutItem(t, sp, entity) - - collected := []string{} - - walkFunc := func(item *Item) error { - collected = append(collected, item.ID) - return nil - } - - sp.Walk(walkFunc) - if len(collected) != testIterationCount { - t.Fatalf("unable to walk on all the items in the packer") - } -} - -func TestStoragePackerV2_Inmem(t *testing.T) { - sp, err := NewStoragePackerV2(&Config{ - BucketBaseCount: testBucketBaseCount, - BucketShardCount: testBucketShardCount, - BucketMaxSize: testBucketMaxSize, - View: &logical.InmemStorage{}, - Logger: logging.NewVaultLogger(log.Trace), - }) - if err != nil { - t.Fatal(err) - } - - entity := &identity.Entity{ - Metadata: map[string]string{ - "samplekey1": "samplevalue1", - "samplekey2": "samplevalue2", - "samplekey3": "samplevalue3", - "samplekey4": "samplevalue4", - "samplekey5": "samplevalue5", - }, - } - testPutItem(t, sp, entity) - testGetItem(t, sp, false) - testDeleteItem(t, sp) - testGetItem(t, sp, true) -} - -func TestStoragePackerV2_File(t *testing.T) { - filePath, err := ioutil.TempDir("", "vault") - if err != nil { - t.Fatalf("err: %s", err) - } - //fmt.Printf("filePath: %q\n", filePath) - defer os.RemoveAll(filePath) - - logger := logging.NewVaultLogger(log.Trace) - - config := map[string]string{ - "path": filePath, - } - - storage, err := logical.NewLogicalStorage(logical.LogicalTypeFile, config, logger) - if err != nil { - t.Fatal(err) - } - - sp, err := NewStoragePackerV2(&Config{ - BucketBaseCount: testBucketBaseCount, - BucketShardCount: testBucketShardCount, - BucketMaxSize: testBucketMaxSize, - View: storage, - Logger: logger, - }) - if err != nil { - t.Fatal(err) - } - - entity := &identity.Entity{ - Metadata: map[string]string{ - "samplekey1": "samplevalue1", - "samplekey2": "samplevalue2", - "samplekey3": "samplevalue3", - "samplekey4": "samplevalue4", - "samplekey5": "samplevalue5", - }, - } - - testPutItem(t, sp, entity) - testGetItem(t, sp, false) - testDeleteItem(t, sp) - testGetItem(t, sp, true) -} - -func TestStoragePackerV2_isPowerOfTwo(t *testing.T) { - powersOfTwo := []int{1, 2, 4, 1024, 4096} - notPowersOfTwo := []int{0, 3, 5, 1000, 1023, 4095, 4097, 10000} - for _, val := range powersOfTwo { - if !isPowerOfTwo(val) { - t.Fatalf("%d is a power of two", val) - } - } - for _, val := range notPowersOfTwo { - if isPowerOfTwo(val) { - t.Fatalf("%d is not a power of two", val) - } - } -} - -func testPutItem(t *testing.T, sp *StoragePackerV2, entity *identity.Entity) { - t.Helper() - for i := 1; i <= testIterationCount; i++ { - if i%500 == 0 { - fmt.Printf("put item iteration: %d\n", i) - } - id := strconv.Itoa(i) - entity.ID = id - - marshaledMessage, err := ptypes.MarshalAny(entity) - if err != nil { - t.Fatal(err) - } - - item := &Item{ - ID: id, - Message: marshaledMessage, - } - if err != nil { - t.Fatal(err) - } - - _, err = sp.PutItem(item) - if err != nil { - t.Fatal(err) - } - } -} - -func testGetItem(t *testing.T, sp *StoragePackerV2, expectNil bool) { - t.Helper() - for i := 1; i <= testIterationCount; i++ { - if i%500 == 0 { - fmt.Printf("get item iteration: %d\n", i) - } - id := strconv.Itoa(i) - - itemFetched, err := sp.GetItem(id) - if err != nil { - t.Fatal(err) - } - - switch expectNil { - case itemFetched == nil: - continue - default: - t.Fatalf("expected nil for item %q\n", id) - } - - if itemFetched == nil { - t.Fatalf("failed to read the inserted item %q", id) - } - - var fetchedMessage identity.Entity - err = ptypes.UnmarshalAny(itemFetched.Message, &fetchedMessage) - if err != nil { - t.Fatal(err) - } - - if fetchedMessage.ID != id { - t.Fatalf("failed to fetch item ID: %q\n", id) - } - } -} - -func testDeleteItem(t *testing.T, sp *StoragePackerV2) { - t.Helper() - for i := 1; i <= testIterationCount; i++ { - if i%500 == 0 { - fmt.Printf("delete item iteration: %d\n", i) - } - id := strconv.Itoa(i) - err := sp.DeleteItem(id) - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go index dd057cfc09139..7a1305f37aa80 100644 --- a/vault/external_tests/storagepacker/storagepacker_upgrade_test.go +++ b/vault/external_tests/storagepacker/storagepacker_upgrade_test.go @@ -102,7 +102,7 @@ func TestIdentityStore_StoragePacker_UpgradeFromLegacy(t *testing.T) { } t.Log(buckets) - vault.StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV1)) + vault.StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV2)) // Step 5: Unseal Vault, make sure we can fetch every one of the created // identities, and that storage looks as we expect diff --git a/vault/identity_store.go b/vault/identity_store.go index cd10b4911c7a0..ea5c1f12163cc 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -33,7 +33,7 @@ var ( ) func init() { - StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV1)) + StoragePackerCreationFunc.Store(storagepacker.StoragePackerFactory(storagepacker.NewStoragePackerV2)) } func (c *Core) IdentityStore() *IdentityStore { From ba91e9f04961f1f9ffcc3e73158a766156f5ac78 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 5 Feb 2019 21:11:26 -0500 Subject: [PATCH 25/38] Remove some debug output --- vault/identity_store.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/vault/identity_store.go b/vault/identity_store.go index ea5c1f12163cc..6ead72534ea4b 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -17,7 +17,6 @@ import ( "github.com/hashicorp/vault/helper/strutil" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/logical/framework" - "github.com/kr/pretty" ) const ( @@ -202,7 +201,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { if parsedCount > 0 { i.logger.Trace("parsed entities for invalidation", "key", key, "num_entities", parsedCount) } else { - i.logger.Error("found no groups", "bucket", pretty.Sprint(bucket)) + i.logger.Error("found no groups") } txn.Commit() @@ -284,7 +283,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { if parsedCount > 0 { i.logger.Trace("parsed entities for invalidation", "num_entities", parsedCount) } else { - i.logger.Error("found no entities", "bucket", pretty.Sprint(bucket)) + i.logger.Error("found no entities") } txn.Commit() From 05ace97b7a7628656af4ee63caaa542df0351ada Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 7 Feb 2019 14:35:45 -0500 Subject: [PATCH 26/38] Add ErrValueTooLarge to physical --- physical/consul/consul.go | 11 ++++- physical/consul/consul_test.go | 76 ++++++++++++++++++++++++++++++++++ physical/physical.go | 4 ++ 3 files changed, 90 insertions(+), 1 deletion(-) diff --git a/physical/consul/consul.go b/physical/consul/consul.go index 0ad84dad896dc..06203fd4e3b3f 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -402,6 +402,9 @@ func (c *ConsulBackend) Transaction(ctx context.Context, txns []*physical.TxnEnt ok, resp, _, err := c.kv.Txn(ops, queryOpts) if err != nil { + if strings.Contains(err.Error(), "is too large") { + return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err) + } return err } if ok && len(resp.Errors) == 0 { @@ -432,7 +435,13 @@ func (c *ConsulBackend) Put(ctx context.Context, entry *physical.Entry) error { writeOpts = writeOpts.WithContext(ctx) _, err := c.kv.Put(pair, writeOpts) - return err + if err != nil { + if strings.Contains(err.Error(), "Value exceeds") { + return errwrap.Wrapf(fmt.Sprintf("%s: {{err}}", physical.ErrValueTooLarge), err) + } + return err + } + return nil } // Get is used to fetch an entry diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index 625c973b1561d..f772c186cc4b1 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -1,10 +1,12 @@ package consul import ( + "context" "fmt" "math/rand" "os" "reflect" + "strings" "sync" "testing" "time" @@ -532,6 +534,80 @@ func TestConsulBackend(t *testing.T) { physical.ExerciseBackend_ListPrefix(t, b) } +func TestConsul_TooLarge(t *testing.T) { + var token string + addr := os.Getenv("CONSUL_HTTP_ADDR") + if addr == "" { + cid, connURL := prepareTestContainer(t) + if cid != "" { + defer cleanupTestContainer(t, cid) + } + addr = connURL + token = dockertest.ConsulACLMasterToken + } + + conf := api.DefaultConfig() + conf.Address = addr + conf.Token = token + client, err := api.NewClient(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + defer func() { + client.KV().DeleteTree(randPath, nil) + }() + + logger := logging.NewVaultLogger(log.Debug) + + b, err := NewConsulBackend(map[string]string{ + "address": conf.Address, + "path": randPath, + "max_parallel": "256", + "token": conf.Token, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + zeros := make([]byte, 600000, 600000) + n, err := rand.Read(zeros) + if n != 600000 { + t.Fatalf("expected 500k zeros, read %d", n) + } + if err != nil { + t.Fatal(err) + } + + err = b.Put(context.Background(), &physical.Entry{ + Key: "foo", + Value: zeros, + }) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected value too large error, got %v", err) + } + + err = b.(physical.Transactional).Transaction(context.Background(), []*physical.TxnEntry{ + { + Operation: physical.PutOperation, + Entry: &physical.Entry{ + Key: "foo", + Value: zeros, + }, + }, + }) + if err == nil { + t.Fatal("expected error") + } + if !strings.Contains(err.Error(), physical.ErrValueTooLarge) { + t.Fatalf("expected value too large error, got %v", err) + } +} + func TestConsulHABackend(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") diff --git a/physical/physical.go b/physical/physical.go index 0f4b0002513f9..cb621282fba75 100644 --- a/physical/physical.go +++ b/physical/physical.go @@ -20,6 +20,10 @@ const ( PutOperation = "put" ) +const ( + ErrValueTooLarge = "put failed due to value being too large" +) + // ShutdownSignal type ShutdownChannel chan struct{} From a5d4326e0b66c289a199fc9258f9d2caaa94cea3 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Thu, 7 Feb 2019 18:12:51 -0500 Subject: [PATCH 27/38] Interim, to move machines --- helper/forwarding/types.pb.go | 8 +-- helper/storagepacker/storagepacker_v2.go | 44 +++++++-------- helper/storagepacker/types.pb.go | 70 +++++++++--------------- helper/storagepacker/types.proto | 4 -- 4 files changed, 48 insertions(+), 78 deletions(-) diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index e7b104c6a7db3..09813b6423a55 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -23,7 +23,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Request struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -115,12 +115,12 @@ type URL struct { Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` // This isn't needed now but might be in the future, so we'll skip the // number to keep the ordering in net/url - // UserInfo user = 3; + //UserInfo user = 3; Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` // This also isn't needed right now, but we'll reserve the number - // bool force_query = 7; + //bool force_query = 7; RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -244,7 +244,7 @@ func (m *HeaderEntry) GetValues() []string { type Response struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Added in 0.6.2 to ensure that the content-type is set appropriately, as diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 344aeb18e9934..4935146552be1 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -19,18 +19,12 @@ import ( "github.com/hashicorp/vault/helper/cryptoutil" "github.com/hashicorp/vault/helper/locksutil" "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/physical" ) const ( defaultBaseBucketBits = 8 defaultBucketShardBits = 4 - // Larger size of the bucket size adversely affects the performance of the - // storage packer. Also, some of the backends impose a maximum size limit - // on the objects that gets persisted. For example, Consul imposes 256KB if using transactions - // and DynamoDB imposes 400KB. Going forward, if there exists storage - // backends that has more constrained limits, this will have to become more - // flexible. For now, 240KB seems like a decent value. - defaultBucketMaxSize = 240 * 1024 ) type Config struct { @@ -49,11 +43,6 @@ type Config struct { // BucketShardBits is the number of bits to use for sub-buckets a bucket // gets sharded into when it reaches the maximum threshold. BucketShardBits int `json:"-"` - - // BucketMaxSize (in bytes) is the maximum allowed size per bucket. When - // the size of the bucket reaches a threshold relative to this limit, it - // gets sharded into the configured number of pieces incrementally. - BucketMaxSize int64 `json:"-"` } // StoragePacker packs many items into abstractions called buckets. The goal @@ -258,7 +247,12 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e defer bucket.Unlock() if err := s.storeBucket(ctx, bucket); err != nil { - return err + if strings.Contains(err.Error(), physical.ErrValueTooLarge) { + err = s.shardBucket(ctx, bucket) + } + if err != nil { + return err + } } s.bucketsCacheLock.Lock() @@ -268,6 +262,17 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e return nil } +func (s *StoragePacker) shardBucket(ctx context.Context, bucket *LockedBucket) error { + for i := 0; i < 2^s.BucketShardBits; i++ { + shardedBucket := &LockedBucket{Bucket: &Bucket{}} + bucket.Buckets[fmt.Sprintf("%x", i)] = shardedBucket + } + cacheKey := hexVal[0 : s.BaseBucketBits/4] + lock := locksutil.LockForKey(s.storageLocks, cacheKey) + lock.RLock() + +} + // storeBucket actually stores the bucket. It expects that it's already locked. func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket) error { if atomic.LoadUint32(&s.queueMode) == 1 { @@ -560,14 +565,7 @@ func NewStoragePackerV2(ctx context.Context, config *Config) (StoragePacker, err // constant: the bucket base count, so we know how many to expect at // the base level // - // The rest of the values can change; the max size can change based on - // e.g. if storage is migrated, so as long as we don't move to a new - // location with a smaller value we're fine (and even then we're fine - // if we can read it; otherwise storage migration would have failed - // anyways). The shard count is recorded in each bucket at the time - // it's sharded; if we realize it's more efficient to do some other - // value later we can update it and use that going forward for new - // shards. + // The rest of the values can change config.BaseBucketBits = exist.BaseBucketBits } @@ -575,10 +573,6 @@ func NewStoragePackerV2(ctx context.Context, config *Config) (StoragePacker, err config.BucketShardBits = defaultBucketShardBits } - if config.BucketMaxSize == 0 { - config.BucketMaxSize = defaultBucketMaxSize - } - if config.BaseBucketBits%4 != 0 { return nil, fmt.Errorf("bucket base bits of %d is not a multiple of four", config.BaseBucketBits) } diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index ef3974e1f3fc2..725f40feeed0f 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -85,14 +85,10 @@ type Bucket struct { // ItemMap stores a mapping of item ID to message. Used by v2. ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Buckets are the buckets contained within this bucket - Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Sharded indicates if the contained buckets are pushed out or not - Sharded bool `sentinel:"" protobuf:"varint,5,opt,name=sharded,proto3" json:"sharded,omitempty"` - // Size of this bucket in number of bytes - Size uint32 `sentinel:"" protobuf:"varint,6,opt,name=size,proto3" json:"size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } @@ -148,20 +144,6 @@ func (m *Bucket) GetBuckets() map[string]*Bucket { return nil } -func (m *Bucket) GetSharded() bool { - if m != nil { - return m.Sharded - } - return false -} - -func (m *Bucket) GetSize() uint32 { - if m != nil { - return m.Size - } - return 0 -} - func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") @@ -172,27 +154,25 @@ func init() { func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } var fileDescriptor_c0e98c66c4f51b7f = []byte{ - // 341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x4b, 0xeb, 0x40, - 0x10, 0xc6, 0x49, 0xd2, 0x36, 0x7d, 0xd3, 0xf6, 0xf1, 0xd8, 0xa7, 0xb0, 0xf6, 0x14, 0x7a, 0x8a, - 0x0a, 0xbb, 0x58, 0x2f, 0x22, 0x7a, 0xb0, 0xa0, 0xe0, 0x41, 0xd0, 0x1c, 0xbd, 0xc8, 0x26, 0x19, - 0x93, 0xd0, 0xa4, 0x09, 0xbb, 0x9b, 0x42, 0xfc, 0x3f, 0xfc, 0x7f, 0xa5, 0xd9, 0x16, 0x5a, 0x09, - 0x3d, 0xed, 0x0c, 0xf3, 0x7d, 0xbf, 0xfd, 0x66, 0x59, 0xf0, 0x52, 0xcc, 0x2b, 0x94, 0x5c, 0xe9, - 0x52, 0x8a, 0x04, 0x2b, 0x11, 0x2d, 0x51, 0x72, 0xdd, 0x54, 0xa8, 0x58, 0x25, 0x4b, 0x5d, 0x92, - 0xc9, 0xc1, 0x68, 0x7a, 0x96, 0x94, 0x65, 0x92, 0x23, 0x6f, 0x87, 0x61, 0xfd, 0xc9, 0xc5, 0xaa, - 0x31, 0xca, 0xd9, 0x13, 0xf4, 0x9e, 0x35, 0x16, 0xe4, 0x2f, 0xd8, 0x59, 0x4c, 0x2d, 0xcf, 0xf2, - 0xff, 0x04, 0x76, 0x16, 0x13, 0x06, 0x6e, 0x81, 0x4a, 0x89, 0x04, 0xa9, 0xed, 0x59, 0xfe, 0x68, - 0x7e, 0xc2, 0x0c, 0x84, 0xed, 0x20, 0xec, 0x61, 0xd5, 0x04, 0x3b, 0xd1, 0xec, 0xdb, 0x81, 0xc1, - 0xa2, 0x8e, 0x96, 0xa8, 0xc9, 0x3f, 0x70, 0x96, 0xd8, 0x6c, 0x59, 0x9b, 0x92, 0x9c, 0x43, 0x3f, - 0xd3, 0x58, 0x28, 0x6a, 0x7b, 0x8e, 0x3f, 0x9a, 0xff, 0x67, 0x07, 0xf1, 0xd8, 0x26, 0x40, 0x60, - 0x14, 0xe4, 0x1e, 0x86, 0x9b, 0xe2, 0xa3, 0x10, 0x15, 0x75, 0x5a, 0xf5, 0xec, 0x97, 0xda, 0xdc, - 0xd2, 0x9a, 0x5e, 0x44, 0xf5, 0xb8, 0xd2, 0xb2, 0x09, 0xdc, 0xcc, 0x74, 0xe4, 0x0e, 0xdc, 0xb0, - 0x9d, 0x2b, 0xda, 0x3b, 0xe6, 0x36, 0x87, 0xda, 0xba, 0xb7, 0x16, 0x42, 0xc1, 0x55, 0xa9, 0x90, - 0x31, 0xc6, 0xb4, 0xef, 0x59, 0xfe, 0x30, 0xd8, 0xb5, 0x84, 0x40, 0x4f, 0x65, 0x5f, 0x48, 0x07, - 0x9e, 0xe5, 0x4f, 0x82, 0xb6, 0x9e, 0xbe, 0xc2, 0x78, 0x3f, 0x44, 0xc7, 0xde, 0x17, 0xd0, 0x5f, - 0x8b, 0xbc, 0x3e, 0xfe, 0x84, 0x46, 0x72, 0x6b, 0xdf, 0x58, 0xd3, 0x37, 0x18, 0xef, 0x07, 0xeb, - 0x20, 0x5e, 0x1e, 0x12, 0x4f, 0x3b, 0xb7, 0xdb, 0x43, 0x2e, 0xae, 0xde, 0x79, 0x92, 0xe9, 0xb4, - 0x0e, 0x59, 0x54, 0x16, 0x3c, 0x15, 0x2a, 0xcd, 0xa2, 0x52, 0x56, 0x7c, 0x2d, 0xea, 0x5c, 0xf3, - 0xae, 0x8f, 0x14, 0x0e, 0xda, 0x78, 0xd7, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x29, 0x1c, - 0x91, 0x67, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4b, 0xfb, 0x30, + 0x18, 0xc6, 0x69, 0xf7, 0xeb, 0xfb, 0x7d, 0x37, 0x45, 0xa2, 0x42, 0xdd, 0xa9, 0xec, 0x34, 0x15, + 0x12, 0x9c, 0x17, 0x11, 0x3d, 0x38, 0x50, 0xf0, 0x20, 0x68, 0x8f, 0x5e, 0x24, 0xed, 0x5e, 0xdb, + 0xb0, 0x76, 0x09, 0x49, 0x3a, 0xe8, 0x5f, 0xec, 0xbf, 0x21, 0x6b, 0x36, 0x58, 0xa5, 0xec, 0xd4, + 0xb7, 0x3c, 0xcf, 0xf3, 0x79, 0x9f, 0x84, 0x40, 0x98, 0x61, 0xae, 0x50, 0x33, 0x63, 0xa5, 0xe6, + 0x29, 0x2a, 0x9e, 0x2c, 0x51, 0x33, 0x5b, 0x29, 0x34, 0x54, 0x69, 0x69, 0x25, 0x39, 0x6a, 0x48, + 0xe3, 0x8b, 0x54, 0xca, 0x34, 0x47, 0x56, 0x8b, 0x71, 0xf9, 0xcd, 0xf8, 0xaa, 0x72, 0xce, 0xc9, + 0x0b, 0x74, 0x5f, 0x2d, 0x16, 0xe4, 0x18, 0x7c, 0xb1, 0x08, 0xbc, 0xd0, 0x9b, 0xfe, 0x8f, 0x7c, + 0xb1, 0x20, 0x14, 0x06, 0x05, 0x1a, 0xc3, 0x53, 0x0c, 0xfc, 0xd0, 0x9b, 0x0e, 0x67, 0x67, 0xd4, + 0x41, 0xe8, 0x0e, 0x42, 0x9f, 0x56, 0x55, 0xb4, 0x33, 0x4d, 0x7e, 0x7c, 0xe8, 0xcf, 0xcb, 0x64, + 0x89, 0x96, 0x9c, 0x40, 0x67, 0x89, 0xd5, 0x96, 0xb5, 0x19, 0xc9, 0x25, 0xf4, 0x84, 0xc5, 0xc2, + 0x04, 0x7e, 0xd8, 0x99, 0x0e, 0x67, 0xa7, 0xb4, 0x51, 0x8f, 0x6e, 0x0a, 0x44, 0xce, 0x41, 0x1e, + 0xe1, 0xdf, 0x66, 0xf8, 0x2a, 0xb8, 0x0a, 0x3a, 0xb5, 0x7b, 0xf2, 0xc7, 0xed, 0xb6, 0xd4, 0xa1, + 0x37, 0xae, 0x9e, 0x57, 0x56, 0x57, 0xd1, 0x40, 0xb8, 0x3f, 0xf2, 0x00, 0x83, 0xb8, 0xd6, 0x4d, + 0xd0, 0x3d, 0x94, 0x76, 0x1f, 0xb3, 0x4d, 0x6f, 0x23, 0xe3, 0x77, 0x18, 0xed, 0x63, 0x5b, 0x4e, + 0x72, 0x05, 0xbd, 0x35, 0xcf, 0xcb, 0xc3, 0x97, 0xe2, 0x2c, 0xf7, 0xfe, 0x9d, 0x37, 0xfe, 0x80, + 0xd1, 0xfe, 0xaa, 0x16, 0xe2, 0x75, 0x93, 0x78, 0xde, 0xda, 0x77, 0x0f, 0x39, 0xbf, 0xf9, 0x64, + 0xa9, 0xb0, 0x59, 0x19, 0xd3, 0x44, 0x16, 0x2c, 0xe3, 0x26, 0x13, 0x89, 0xd4, 0x8a, 0xad, 0x79, + 0x99, 0x5b, 0xd6, 0xf6, 0x34, 0xe2, 0x7e, 0x5d, 0xef, 0xf6, 0x37, 0x00, 0x00, 0xff, 0xff, 0x7b, + 0x21, 0xba, 0x2f, 0x39, 0x02, 0x00, 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 48b5afd211ac1..a57a84f7c2932 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -29,8 +29,4 @@ message Bucket { map item_map = 3; // Buckets are the buckets contained within this bucket map buckets = 4; - // Sharded indicates if the contained buckets are pushed out or not - bool sharded = 5; - // Size of this bucket in number of bytes - uint32 size = 6; } From 92d8bbd681c352096cc5578442edd747e4d23c14 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Fri, 8 Feb 2019 14:19:30 -0500 Subject: [PATCH 28/38] Checking in initial sharding impl --- helper/forwarding/types.pb.go | 8 +- helper/storagepacker/storagepacker_v2.go | 195 ++++++++++++++++------- helper/storagepacker/types.pb.go | 58 +++---- helper/storagepacker/types.proto | 2 - 4 files changed, 164 insertions(+), 99 deletions(-) diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index 09813b6423a55..e7b104c6a7db3 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -23,7 +23,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Request struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - //uint64 id = 1; + // uint64 id = 1; Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -115,12 +115,12 @@ type URL struct { Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` // This isn't needed now but might be in the future, so we'll skip the // number to keep the ordering in net/url - //UserInfo user = 3; + // UserInfo user = 3; Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` // This also isn't needed right now, but we'll reserve the number - //bool force_query = 7; + // bool force_query = 7; RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -244,7 +244,7 @@ func (m *HeaderEntry) GetValues() []string { type Response struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - //uint64 id = 1; + // uint64 id = 1; StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Added in 0.6.2 to ensure that the content-type is set appropriately, as diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 4935146552be1..8fc6d0c6e61ce 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -65,12 +65,17 @@ type StoragePackerV2 struct { queueMode uint32 queuedBuckets sync.Map + + // disableSharding is used for tests + disableSharding bool + + prewarmCache sync.Once } // LockedBucket embeds a bucket and its corresponding lock to ensure thread // safety type LockedBucket struct { - sync.RWMutex + *locksutil.LockEntry *Bucket } @@ -89,26 +94,10 @@ func (s *StoragePackerV2) BucketStorageKeyForItemID(itemID string) string { return bucketRaw.(*LockedBucket).Key } - // If we have existing buckets we'd have parsed them in on startup - // (assuming that all users load all entries on startup), so this is a - // fresh storagepacker, so we use the root bits to return a proper number - // of chars. But first do that, lock, and try again to ensure nothing - // changed without holding a lock. - cacheKey := hexVal[0 : s.BaseBucketBits/4] - lock := locksutil.LockForKey(s.storageLocks, cacheKey) - lock.RLock() - - s.bucketsCacheLock.RLock() - _, bucketRaw, found = s.bucketsCache.LongestPrefix(hexVal) - s.bucketsCacheLock.RUnlock() - - lock.RUnlock() - - if found { - return bucketRaw.(*LockedBucket).Key - } - - return cacheKey + // If we have existing buckets we'd have parsed them in on startup so this + // is a fresh storagepacker, so we use the root bits to return a proper + // number of chars. + return hexVal[0 : s.BaseBucketBits/4] } func (s *StoragePackerV2) BucketKeyHashByItemID(itemID string) string { @@ -120,27 +109,48 @@ func (s *StoragePackerV2) GetCacheKey(key string) string { } func (s *StoragePackerV2) BucketKeys(ctx context.Context) ([]string, error) { - keys := map[string]struct{}{} - diskBuckets, err := logical.CollectKeys(ctx, s.BucketStorageView) - if err != nil { - return nil, err - } - for _, bucket := range diskBuckets { - keys[bucket] = struct{}{} + var retErr error + s.prewarmCache.Do(func() { + diskBuckets, err := logical.CollectKeys(ctx, s.BucketStorageView) + if err != nil { + retErr = err + return + } + for _, key := range diskBuckets { + // Read from the underlying view + storageEntry, err := s.BucketStorageView.Get(ctx, key) + if err != nil { + retErr = errwrap.Wrapf("failed to read packed storage entry: {{err}}", err) + return + } + if storageEntry == nil { + retErr = fmt.Errorf("no data found at bucket %s", key) + return + } + + bucket, err := s.DecodeBucket(storageEntry) + if err != nil { + retErr = err + return + } + + s.bucketsCacheLock.Lock() + s.bucketsCache.Insert(s.GetCacheKey(bucket.Key), bucket) + s.bucketsCacheLock.Unlock() + } + }) + if retErr != nil { + return nil, retErr } + ret := make([]string, 0, 256) s.bucketsCacheLock.RLock() s.bucketsCache.Walk(func(s string, _ interface{}) bool { - keys[s] = struct{}{} + ret = append(ret, s) return false }) s.bucketsCacheLock.RUnlock() - ret := make([]string, 0, len(keys)) - for k := range keys { - ret = append(ret, k) - } - return ret, nil } @@ -219,8 +229,11 @@ func (s *StoragePackerV2) DecodeBucket(storageEntry *logical.StorageEntry) (*Loc return nil, errwrap.Wrapf("failed to decode packed storage entry: {{err}}", err) } + cacheKey := s.GetCacheKey(storageEntry.Key) + lock := locksutil.LockForKey(s.storageLocks, cacheKey) lb := &LockedBucket{ - Bucket: &bucket, + LockEntry: lock, + Bucket: &bucket, } lb.Key = storageEntry.Key @@ -239,16 +252,12 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e cacheKey := s.GetCacheKey(bucket.Key) - lock := locksutil.LockForKey(s.storageLocks, cacheKey) - lock.Lock() - defer lock.Unlock() - bucket.Lock() defer bucket.Unlock() if err := s.storeBucket(ctx, bucket); err != nil { - if strings.Contains(err.Error(), physical.ErrValueTooLarge) { - err = s.shardBucket(ctx, bucket) + if strings.Contains(err.Error(), physical.ErrValueTooLarge) && !s.disableSharding { + err = s.shardBucket(ctx, bucket, cacheKey) } if err != nil { return err @@ -262,15 +271,91 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e return nil } -func (s *StoragePacker) shardBucket(ctx context.Context, bucket *LockedBucket) error { +func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, cacheKey string) error { + // Create the shards and lock them + locks := make(map[*locksutil.LockEntry]struct{}, 2^s.BucketShardBits) + shards := make(map[string]*LockedBucket, 2^s.BucketShardBits) for i := 0; i < 2^s.BucketShardBits; i++ { - shardedBucket := &LockedBucket{Bucket: &Bucket{}} - bucket.Buckets[fmt.Sprintf("%x", i)] = shardedBucket + shardKey := fmt.Sprintf("%x", i) + lock := locksutil.LockForKey(s.storageLocks, cacheKey+shardKey) + shardedBucket := &LockedBucket{ + LockEntry: lock, + Bucket: &Bucket{ + Key: fmt.Sprintf("%s/%s", bucket.Key, shardKey), + }, + } + shards[shardKey] = shardedBucket + // If it was equal we'd be locked already + if lock != bucket.LockEntry { + // Don't try to lock the same lock twice in case it hashes that way + if _, ok := locks[lock]; !ok { + lock.Lock() + defer lock.Unlock() + locks[lock] = struct{}{} + } + } + } + + parentPrefix := s.GetCacheKey(bucket.Key) + // Resilver the items + for k, v := range bucket.ItemMap { + itemKey := strings.TrimPrefix(k, parentPrefix)[0 : s.BucketShardBits/4] + // Sanity check + childBucket, ok := shards[itemKey] + if !ok { + // We didn't complete sharding so don't make other parts of the + // code think that it completed + s.Logger.Error("failed to find sharded storagepacker bucket", "bucket_key", bucket.Key, "item_key", itemKey) + return errors.New("failed to shard storagepacker bucket") + } + childBucket.ItemMap[k] = v } - cacheKey := hexVal[0 : s.BaseBucketBits/4] - lock := locksutil.LockForKey(s.storageLocks, cacheKey) - lock.RLock() + // Ensure we can write all of these buckets. Create a cleanup function if not. + retErr := new(multierror.Error) + cleanupStorage := func() { + for _, v := range shards { + if err := s.BucketStorageView.Delete(ctx, v.Key); err != nil { + retErr = multierror.Append(retErr, err) + // Don't exit out, clean up as much as possible + } + } + } + for _, v := range shards { + if err := s.storeBucket(ctx, v); err != nil { + retErr = multierror.Append(retErr, err) + cleanupStorage() + return retErr + } + } + + cleanupCache := func() { + for _, v := range shards { + s.bucketsCache.Delete(s.GetCacheKey(v.Key)) + } + } + // Add to the cache. It's not too late to back out, via the cleanup cache + // function. We hold the lock while storing the updated original bucket so + // that nobody accesses it in an inconsistent state. + s.bucketsCacheLock.Lock() + { + for _, v := range shards { + s.bucketsCache.Insert(s.GetCacheKey(v.Key), v) + } + + // Finally, update the original and persist + origBucketItemMap := bucket.ItemMap + bucket.ItemMap = nil + if err := s.storeBucket(ctx, bucket); err != nil { + retErr = multierror.Append(retErr, err) + bucket.ItemMap = origBucketItemMap + cleanupStorage() + cleanupCache() + } + } + s.bucketsCacheLock.Unlock() + + return retErr.ErrorOrNil() } // storeBucket actually stores the bucket. It expects that it's already locked. @@ -393,9 +478,6 @@ func (s *StoragePackerV2) DeleteItem(ctx context.Context, itemID string) error { s.bucketsCacheLock.Unlock() } - bucket.Lock() - defer bucket.Unlock() - if len(bucket.ItemMap) == 0 { return nil } @@ -451,20 +533,15 @@ func (s *StoragePackerV2) GetItem(ctx context.Context, itemID string) (*Item, er s.bucketsCacheLock.Unlock() } - bucket.RLock() - if len(bucket.ItemMap) == 0 { - bucket.RUnlock() return nil, nil } item, ok := bucket.ItemMap[itemID] if !ok { - bucket.RUnlock() return nil, nil } - bucket.RUnlock() return &Item{ ID: itemID, Message: item, @@ -522,9 +599,6 @@ func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { s.bucketsCacheLock.Unlock() } - bucket.Lock() - defer bucket.Unlock() - if err := bucket.upsert(item); err != nil { return errwrap.Wrapf("failed to update entry in packed storage entry: {{err}}", err) } @@ -605,6 +679,11 @@ func NewStoragePackerV2(ctx context.Context, config *Config) (StoragePacker, err storageLocks: locksutil.CreateLocks(), } + // Prewarm the cache + if _, err := packer.BucketKeys(ctx); err != nil { + return nil, errwrap.Wrapf("error preloading storagepacker cache: {{err}}", err) + } + return packer, nil } diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 725f40feeed0f..78c1fce4dab9f 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -83,12 +83,10 @@ type Bucket struct { // Items holds the items contained within this bucket. Used by v1. Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` // ItemMap stores a mapping of item ID to message. Used by v2. - ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Buckets are the buckets contained within this bucket - Buckets map[string]*Bucket `sentinel:"" protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } @@ -137,42 +135,32 @@ func (m *Bucket) GetItemMap() map[string]*any.Any { return nil } -func (m *Bucket) GetBuckets() map[string]*Bucket { - if m != nil { - return m.Buckets - } - return nil -} - func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") - proto.RegisterMapType((map[string]*Bucket)(nil), "storagepacker.Bucket.BucketsEntry") proto.RegisterMapType((map[string]*any.Any)(nil), "storagepacker.Bucket.ItemMapEntry") } func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } var fileDescriptor_c0e98c66c4f51b7f = []byte{ - // 311 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4b, 0xfb, 0x30, - 0x18, 0xc6, 0x69, 0xf7, 0xeb, 0xfb, 0x7d, 0x37, 0x45, 0xa2, 0x42, 0xdd, 0xa9, 0xec, 0x34, 0x15, - 0x12, 0x9c, 0x17, 0x11, 0x3d, 0x38, 0x50, 0xf0, 0x20, 0x68, 0x8f, 0x5e, 0x24, 0xed, 0x5e, 0xdb, - 0xb0, 0x76, 0x09, 0x49, 0x3a, 0xe8, 0x5f, 0xec, 0xbf, 0x21, 0x6b, 0x36, 0x58, 0xa5, 0xec, 0xd4, - 0xb7, 0x3c, 0xcf, 0xf3, 0x79, 0x9f, 0x84, 0x40, 0x98, 0x61, 0xae, 0x50, 0x33, 0x63, 0xa5, 0xe6, - 0x29, 0x2a, 0x9e, 0x2c, 0x51, 0x33, 0x5b, 0x29, 0x34, 0x54, 0x69, 0x69, 0x25, 0x39, 0x6a, 0x48, - 0xe3, 0x8b, 0x54, 0xca, 0x34, 0x47, 0x56, 0x8b, 0x71, 0xf9, 0xcd, 0xf8, 0xaa, 0x72, 0xce, 0xc9, - 0x0b, 0x74, 0x5f, 0x2d, 0x16, 0xe4, 0x18, 0x7c, 0xb1, 0x08, 0xbc, 0xd0, 0x9b, 0xfe, 0x8f, 0x7c, - 0xb1, 0x20, 0x14, 0x06, 0x05, 0x1a, 0xc3, 0x53, 0x0c, 0xfc, 0xd0, 0x9b, 0x0e, 0x67, 0x67, 0xd4, - 0x41, 0xe8, 0x0e, 0x42, 0x9f, 0x56, 0x55, 0xb4, 0x33, 0x4d, 0x7e, 0x7c, 0xe8, 0xcf, 0xcb, 0x64, - 0x89, 0x96, 0x9c, 0x40, 0x67, 0x89, 0xd5, 0x96, 0xb5, 0x19, 0xc9, 0x25, 0xf4, 0x84, 0xc5, 0xc2, - 0x04, 0x7e, 0xd8, 0x99, 0x0e, 0x67, 0xa7, 0xb4, 0x51, 0x8f, 0x6e, 0x0a, 0x44, 0xce, 0x41, 0x1e, - 0xe1, 0xdf, 0x66, 0xf8, 0x2a, 0xb8, 0x0a, 0x3a, 0xb5, 0x7b, 0xf2, 0xc7, 0xed, 0xb6, 0xd4, 0xa1, - 0x37, 0xae, 0x9e, 0x57, 0x56, 0x57, 0xd1, 0x40, 0xb8, 0x3f, 0xf2, 0x00, 0x83, 0xb8, 0xd6, 0x4d, - 0xd0, 0x3d, 0x94, 0x76, 0x1f, 0xb3, 0x4d, 0x6f, 0x23, 0xe3, 0x77, 0x18, 0xed, 0x63, 0x5b, 0x4e, - 0x72, 0x05, 0xbd, 0x35, 0xcf, 0xcb, 0xc3, 0x97, 0xe2, 0x2c, 0xf7, 0xfe, 0x9d, 0x37, 0xfe, 0x80, - 0xd1, 0xfe, 0xaa, 0x16, 0xe2, 0x75, 0x93, 0x78, 0xde, 0xda, 0x77, 0x0f, 0x39, 0xbf, 0xf9, 0x64, - 0xa9, 0xb0, 0x59, 0x19, 0xd3, 0x44, 0x16, 0x2c, 0xe3, 0x26, 0x13, 0x89, 0xd4, 0x8a, 0xad, 0x79, - 0x99, 0x5b, 0xd6, 0xf6, 0x34, 0xe2, 0x7e, 0x5d, 0xef, 0xf6, 0x37, 0x00, 0x00, 0xff, 0xff, 0x7b, - 0x21, 0xba, 0x2f, 0x39, 0x02, 0x00, 0x00, + // 276 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcf, 0x4b, 0xc3, 0x30, + 0x14, 0xc7, 0x69, 0xeb, 0x36, 0x7d, 0x53, 0x91, 0xe8, 0xa1, 0xee, 0x54, 0x7a, 0xaa, 0x1e, 0x12, + 0x9c, 0x17, 0x11, 0x3c, 0x38, 0x50, 0xf0, 0x20, 0x48, 0x8f, 0x5e, 0x24, 0xed, 0x9e, 0x6d, 0xe8, + 0x8f, 0x84, 0x24, 0x1d, 0xf4, 0x1f, 0xf5, 0xef, 0x91, 0x36, 0x0e, 0x9c, 0x0c, 0x6f, 0x2f, 0x7c, + 0x3f, 0xf9, 0xe4, 0x1b, 0x1e, 0x44, 0x25, 0xd6, 0x0a, 0x35, 0x33, 0x56, 0x6a, 0x5e, 0xa0, 0xe2, + 0x79, 0x85, 0x9a, 0xd9, 0x5e, 0xa1, 0xa1, 0x4a, 0x4b, 0x2b, 0xc9, 0xc9, 0x4e, 0xb4, 0xb8, 0x2c, + 0xa4, 0x2c, 0x6a, 0x64, 0x63, 0x98, 0x75, 0x9f, 0x8c, 0xb7, 0xbd, 0x23, 0xe3, 0x67, 0x38, 0x78, + 0xb1, 0xd8, 0x90, 0x53, 0xf0, 0xc5, 0x3a, 0xf4, 0x22, 0x2f, 0x39, 0x4a, 0x7d, 0xb1, 0x26, 0x14, + 0x66, 0x0d, 0x1a, 0xc3, 0x0b, 0x0c, 0xfd, 0xc8, 0x4b, 0xe6, 0xcb, 0x0b, 0xea, 0x24, 0x74, 0x2b, + 0xa1, 0x8f, 0x6d, 0x9f, 0x6e, 0xa1, 0xf8, 0xcb, 0x83, 0xe9, 0xaa, 0xcb, 0x2b, 0xb4, 0xe4, 0x0c, + 0x82, 0x0a, 0xfb, 0x1f, 0xd7, 0x30, 0x92, 0x2b, 0x98, 0x08, 0x8b, 0x8d, 0x09, 0xfd, 0x28, 0x48, + 0xe6, 0xcb, 0x73, 0xba, 0x53, 0x8f, 0x0e, 0x05, 0x52, 0x47, 0x90, 0x07, 0x38, 0x1c, 0x86, 0x8f, + 0x86, 0xab, 0x30, 0x18, 0xe9, 0xf8, 0x0f, 0xed, 0x5e, 0x19, 0x2f, 0xbd, 0x72, 0xf5, 0xd4, 0x5a, + 0xdd, 0xa7, 0x33, 0xe1, 0x4e, 0x8b, 0x37, 0x38, 0xfe, 0x1d, 0xec, 0xe9, 0x72, 0x0d, 0x93, 0x0d, + 0xaf, 0xbb, 0xff, 0xbf, 0xe5, 0x90, 0x7b, 0xff, 0xce, 0x5b, 0xdd, 0xbc, 0xb3, 0x42, 0xd8, 0xb2, + 0xcb, 0x68, 0x2e, 0x1b, 0x56, 0x72, 0x53, 0x8a, 0x5c, 0x6a, 0xc5, 0x36, 0xbc, 0xab, 0x2d, 0xdb, + 0xb7, 0x89, 0x6c, 0x3a, 0xba, 0x6e, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x46, 0x9d, 0x8a, 0xcb, + 0xa8, 0x01, 0x00, 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index a57a84f7c2932..15387fdab1829 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -27,6 +27,4 @@ message Bucket { repeated Item items = 2; // ItemMap stores a mapping of item ID to message. Used by v2. map item_map = 3; - // Buckets are the buckets contained within this bucket - map buckets = 4; } From 27e0c603eb451547ae31f29abe3af0a0cda41550 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 13 Feb 2019 07:39:33 -0500 Subject: [PATCH 29/38] Fix compile --- vault/identity_store_entities.go | 4 ++-- vault/identity_store_util.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index 09a9ccd194b58..6b1de1d5341b1 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -709,7 +709,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit if persist && !isPerfSecondaryOrStandby { // Delete the entity which we are merging from in storage - err = i.entityPacker.DeleteItem(fromEntity.ID) + err = i.entityPacker.DeleteItem(ctx, fromEntity.ID) if err != nil { return nil, err } @@ -733,7 +733,7 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit Message: toEntityAsAny, } - err = i.entityPacker.PutItem(item) + err = i.entityPacker.PutItem(ctx, item) if err != nil { return nil, err } diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index e3da83689cd67..1ed5742d80cf3 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -549,7 +549,7 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e if err != nil { return err } - err = i.entityPacker.PutItem(&storagepacker.Item{ + err = i.entityPacker.PutItem(ctx, &storagepacker.Item{ ID: previousEntity.ID, Message: marshaledPreviousEntity, }) From c72e6ae6581be05a643b627b94b902521858bbc1 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 13 Feb 2019 19:34:08 -0500 Subject: [PATCH 30/38] Sharding works. Lots of debug still in --- helper/storagepacker/storagepacker_v2.go | 132 +++++++++++----- helper/storagepacker/types.pb.go | 8 +- helper/storagepacker/types.proto | 8 +- physical/consul/consul_test.go | 73 +-------- physical/consul/testing.go | 73 +++++++++ .../storagepacker_sharding_test.go | 149 ++++++++++++++++++ 6 files changed, 337 insertions(+), 106 deletions(-) create mode 100644 physical/consul/testing.go create mode 100644 vault/external_tests/storagepacker/storagepacker_sharding_test.go diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 8fc6d0c6e61ce..708eb04e00fbb 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "errors" "fmt" + "math" "strings" "sync" "sync/atomic" @@ -27,6 +28,10 @@ const ( defaultBucketShardBits = 4 ) +var ( + shardLocks = make(map[*locksutil.LockEntry]struct{}, 32) +) + type Config struct { // BucketStorageView is the storage to be used by all the buckets BucketStorageView *logical.StorageView `json:"-"` @@ -69,6 +74,10 @@ type StoragePackerV2 struct { // disableSharding is used for tests disableSharding bool + // Ensures we only process one sharding at a time, so that we can't grab + // the same locks for the child buckets + shardLock sync.RWMutex + prewarmCache sync.Once } @@ -84,7 +93,7 @@ func (s *StoragePackerV2) BucketsView() *logical.StorageView { } func (s *StoragePackerV2) BucketStorageKeyForItemID(itemID string) string { - hexVal := hex.EncodeToString(cryptoutil.Blake2b256Hash(itemID)) + hexVal := GetItemIDHash(itemID) s.bucketsCacheLock.RLock() _, bucketRaw, found := s.bucketsCache.LongestPrefix(hexVal) @@ -108,6 +117,10 @@ func (s *StoragePackerV2) GetCacheKey(key string) string { return strings.Replace(key, "/", "", -1) } +func GetItemIDHash(itemID string) string { + return hex.EncodeToString(cryptoutil.Blake2b256Hash(itemID)) +} + func (s *StoragePackerV2) BucketKeys(ctx context.Context) ([]string, error) { var retErr error s.prewarmCache.Do(func() { @@ -250,18 +263,11 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e return fmt.Errorf("missing key") } - cacheKey := s.GetCacheKey(bucket.Key) - bucket.Lock() defer bucket.Unlock() - if err := s.storeBucket(ctx, bucket); err != nil { - if strings.Contains(err.Error(), physical.ErrValueTooLarge) && !s.disableSharding { - err = s.shardBucket(ctx, bucket, cacheKey) - } - if err != nil { - return err - } + if err := s.storeBucket(ctx, bucket, true); err != nil { + return errwrap.Wrapf("failed at high level bucket put: {{err}}", err) } s.bucketsCacheLock.Lock() @@ -271,35 +277,61 @@ func (s *StoragePackerV2) PutBucket(ctx context.Context, bucket *LockedBucket) e return nil } -func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, cacheKey string) error { +func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, cacheKey string, allowLocking bool) error { + if allowLocking { + s.shardLock.Lock() + defer s.shardLock.Unlock() + + bucketLock := bucket.LockEntry + defer func() { + for lock := range shardLocks { + // Let the initial calling function take care of the highest level lock + if lock != bucketLock { + lock.Unlock() + } + // Empty the map + shardLocks = make(map[*locksutil.LockEntry]struct{}, 32) + } + }() + } + + numShards := int(math.Pow(2.0, float64(s.BucketShardBits))) + // Create the shards and lock them - locks := make(map[*locksutil.LockEntry]struct{}, 2^s.BucketShardBits) - shards := make(map[string]*LockedBucket, 2^s.BucketShardBits) - for i := 0; i < 2^s.BucketShardBits; i++ { + s.Logger.Info("sharding bucket", "bucket_key", bucket.Key, "num_shards", numShards) + defer s.Logger.Info("sharding bucket process exited", "bucket_key", bucket.Key) + + shardLocks[bucket.LockEntry] = struct{}{} + + shards := make(map[string]*LockedBucket, numShards) + for i := 0; i < numShards; i++ { shardKey := fmt.Sprintf("%x", i) lock := locksutil.LockForKey(s.storageLocks, cacheKey+shardKey) shardedBucket := &LockedBucket{ LockEntry: lock, Bucket: &Bucket{ - Key: fmt.Sprintf("%s/%s", bucket.Key, shardKey), + Key: fmt.Sprintf("%s/%s", bucket.Key, shardKey), + ItemMap: make(map[string]*any.Any), }, } shards[shardKey] = shardedBucket // If it was equal we'd be locked already - if lock != bucket.LockEntry { - // Don't try to lock the same lock twice in case it hashes that way - if _, ok := locks[lock]; !ok { - lock.Lock() - defer lock.Unlock() - locks[lock] = struct{}{} - } + s.Logger.Debug("created shard", "shard_key", shardKey) + // Don't try to lock the same lock twice in case it hashes that way + if _, ok := shardLocks[lock]; !ok { + s.Logger.Debug("locking lock", "shard_key", shardKey) + lock.Lock() + shardLocks[lock] = struct{}{} } } + s.Logger.Debug("resilvering items") + parentPrefix := s.GetCacheKey(bucket.Key) // Resilver the items for k, v := range bucket.ItemMap { itemKey := strings.TrimPrefix(k, parentPrefix)[0 : s.BucketShardBits/4] + s.Logger.Trace("resilvering item", "parent_prefix", parentPrefix, "item_id", k, "item_key", itemKey) // Sanity check childBucket, ok := shards[itemKey] if !ok { @@ -311,6 +343,8 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, childBucket.ItemMap[k] = v } + s.Logger.Debug("storing sharded buckets") + // Ensure we can write all of these buckets. Create a cleanup function if not. retErr := new(multierror.Error) cleanupStorage := func() { @@ -321,8 +355,10 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, } } } - for _, v := range shards { - if err := s.storeBucket(ctx, v); err != nil { + for k, v := range shards { + s.Logger.Debug("storing bucket", "shard", k) + if err := s.storeBucket(ctx, v, false); err != nil { + s.Logger.Debug("encountered error", "shard", k) retErr = multierror.Append(retErr, err) cleanupStorage() return retErr @@ -337,8 +373,10 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, // Add to the cache. It's not too late to back out, via the cleanup cache // function. We hold the lock while storing the updated original bucket so // that nobody accesses it in an inconsistent state. + s.Logger.Debug("updating cache") s.bucketsCacheLock.Lock() { + s.Logger.Debug("in locked section") for _, v := range shards { s.bucketsCache.Insert(s.GetCacheKey(v.Key), v) } @@ -346,7 +384,7 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, // Finally, update the original and persist origBucketItemMap := bucket.ItemMap bucket.ItemMap = nil - if err := s.storeBucket(ctx, bucket); err != nil { + if err := s.storeBucket(ctx, bucket, false); err != nil { retErr = multierror.Append(retErr, err) bucket.ItemMap = origBucketItemMap cleanupStorage() @@ -359,12 +397,14 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, } // storeBucket actually stores the bucket. It expects that it's already locked. -func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket) error { +func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket, allowLocking bool) error { if atomic.LoadUint32(&s.queueMode) == 1 { s.queuedBuckets.Store(bucket.Key, bucket) return nil } + s.Logger.Trace("number of items in bucket", "num_items", len(bucket.ItemMap)) + marshaledBucket, err := proto.Marshal(bucket.Bucket) if err != nil { return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) @@ -377,13 +417,20 @@ func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket) return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) } + s.Logger.Trace("size of compressed bucket", "size", len(compressedBucket)) + // Store the compressed value err = s.BucketStorageView.Put(ctx, &logical.StorageEntry{ Key: bucket.Key, Value: compressedBucket, }) if err != nil { - return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) + if strings.Contains(err.Error(), physical.ErrValueTooLarge) && !s.disableSharding { + err = s.shardBucket(ctx, bucket, s.GetCacheKey(bucket.Key), allowLocking) + } + if err != nil { + return errwrap.Wrapf("failed to persist packed storage entry: {{err}}", err) + } } return nil @@ -431,7 +478,9 @@ func (s *LockedBucket) upsert(item *Item) error { s.ItemMap = make(map[string]*any.Any) } - s.ItemMap[item.ID] = item.Message + itemHash := GetItemIDHash(item.ID) + + s.ItemMap[itemHash] = item.Message return nil } @@ -470,9 +519,11 @@ func (s *StoragePackerV2) DeleteItem(ctx context.Context, itemID string) error { bucket, err = s.DecodeBucket(storageEntry) if err != nil { - return errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) + return errwrap.Wrapf("error decoding existing storage entry for deletion: {{err}}", err) } + bucket.LockEntry = lock + s.bucketsCacheLock.Lock() s.bucketsCache.Insert(cacheKey, bucket) s.bucketsCacheLock.Unlock() @@ -482,13 +533,15 @@ func (s *StoragePackerV2) DeleteItem(ctx context.Context, itemID string) error { return nil } - _, ok := bucket.ItemMap[itemID] + itemHash := GetItemIDHash(itemID) + + _, ok := bucket.ItemMap[itemHash] if !ok { return nil } - delete(bucket.ItemMap, itemID) - return s.storeBucket(ctx, bucket) + delete(bucket.ItemMap, itemHash) + return s.storeBucket(ctx, bucket, true) } // GetItem fetches the storage entry for a given key from its corresponding @@ -525,9 +578,11 @@ func (s *StoragePackerV2) GetItem(ctx context.Context, itemID string) (*Item, er bucket, err = s.DecodeBucket(storageEntry) if err != nil { - return nil, errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) + return nil, errwrap.Wrapf("error decoding existing storage entry: {{err}}", err) } + bucket.LockEntry = lock + s.bucketsCacheLock.Lock() s.bucketsCache.Insert(cacheKey, bucket) s.bucketsCacheLock.Unlock() @@ -537,7 +592,9 @@ func (s *StoragePackerV2) GetItem(ctx context.Context, itemID string) (*Item, er return nil, nil } - item, ok := bucket.ItemMap[itemID] + itemHash := GetItemIDHash(itemID) + + item, ok := bucket.ItemMap[itemHash] if !ok { return nil, nil } @@ -583,6 +640,7 @@ func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { if storageEntry == nil { bucket = &LockedBucket{ + LockEntry: lock, Bucket: &Bucket{ Key: bucketKey, }, @@ -592,6 +650,8 @@ func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { if err != nil { return errwrap.Wrapf("error decoding existing storage entry for upsert: {{err}}", err) } + + bucket.LockEntry = lock } s.bucketsCacheLock.Lock() @@ -604,7 +664,7 @@ func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { } // Persist the result - return s.storeBucket(ctx, bucket) + return s.storeBucket(ctx, bucket, true) } // NewStoragePackerV2 creates a new storage packer for a given view @@ -698,7 +758,7 @@ func (s *StoragePackerV2) SetQueueMode(enabled bool) { func (s *StoragePackerV2) FlushQueue(ctx context.Context) error { var err *multierror.Error s.queuedBuckets.Range(func(key, value interface{}) bool { - lErr := s.storeBucket(ctx, value.(*LockedBucket)) + lErr := s.storeBucket(ctx, value.(*LockedBucket), true) if lErr != nil { err = multierror.Append(err, lErr) } diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 78c1fce4dab9f..1858e3fd8839e 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -21,9 +21,13 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package -// Item represents a entry that gets inserted into the storage packer +// Item represents an entry that gets inserted into the storage packer type Item struct { - // ID is the UUID to identify the item + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // message is the contents of the item Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 15387fdab1829..4edfaf4f85721 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -6,9 +6,13 @@ package storagepacker; import "google/protobuf/any.proto"; -// Item represents a entry that gets inserted into the storage packer +// Item represents an entry that gets inserted into the storage packer message Item { - // ID is the UUID to identify the item + // ID must be provided by the caller; the same value, if used with GetItem, + // can be used to fetch the item. However, when iterating through a bucket, + // this ID will be an internal ID. In other words, outside of the use-case + // described above, the caller *must not* rely on this value to be + // consistent with what they passed in. string id = 1; // message is the contents of the item google.protobuf.Any message = 2; diff --git a/physical/consul/consul_test.go b/physical/consul/consul_test.go index f772c186cc4b1..50538707c5a81 100644 --- a/physical/consul/consul_test.go +++ b/physical/consul/consul_test.go @@ -23,8 +23,7 @@ import ( type consulConf map[string]string var ( - addrCount int = 0 - testImagePull sync.Once + addrCount int = 0 ) func testHostIP() string { @@ -497,9 +496,9 @@ func TestConsulBackend(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") if addr == "" { - cid, connURL := prepareTestContainer(t) + cid, connURL := PrepareConsulTestContainer(t) if cid != "" { - defer cleanupTestContainer(t, cid) + defer CleanupConsulTestContainer(t, cid) } addr = connURL token = dockertest.ConsulACLMasterToken @@ -538,9 +537,9 @@ func TestConsul_TooLarge(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") if addr == "" { - cid, connURL := prepareTestContainer(t) + cid, connURL := PrepareConsulTestContainer(t) if cid != "" { - defer cleanupTestContainer(t, cid) + defer CleanupConsulTestContainer(t, cid) } addr = connURL token = dockertest.ConsulACLMasterToken @@ -612,9 +611,9 @@ func TestConsulHABackend(t *testing.T) { var token string addr := os.Getenv("CONSUL_HTTP_ADDR") if addr == "" { - cid, connURL := prepareTestContainer(t) + cid, connURL := PrepareConsulTestContainer(t) if cid != "" { - defer cleanupTestContainer(t, cid) + defer CleanupConsulTestContainer(t, cid) } addr = connURL token = dockertest.ConsulACLMasterToken @@ -665,61 +664,3 @@ func TestConsulHABackend(t *testing.T) { t.Fatalf("bad addr: %v", host) } } - -func prepareTestContainer(t *testing.T) (cid dockertest.ContainerID, retAddress string) { - if os.Getenv("CONSUL_HTTP_ADDR") != "" { - return "", os.Getenv("CONSUL_HTTP_ADDR") - } - - // Without this the checks for whether the container has started seem to - // never actually pass. There's really no reason to expose the test - // containers, so don't. - dockertest.BindDockerToLocalhost = "yep" - - testImagePull.Do(func() { - dockertest.Pull(dockertest.ConsulImageName) - }) - - try := 0 - cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool { - try += 1 - // Build a client and verify that the credentials work - config := api.DefaultConfig() - config.Address = connAddress - config.Token = dockertest.ConsulACLMasterToken - client, err := api.NewClient(config) - if err != nil { - if try > 50 { - panic(err) - } - return false - } - - _, err = client.KV().Put(&api.KVPair{ - Key: "setuptest", - Value: []byte("setuptest"), - }, nil) - if err != nil { - if try > 50 { - panic(err) - } - return false - } - - retAddress = connAddress - return true - }) - - if connErr != nil { - t.Fatalf("could not connect to consul: %v", connErr) - } - - return -} - -func cleanupTestContainer(t *testing.T, cid dockertest.ContainerID) { - err := cid.KillRemove() - if err != nil { - t.Fatal(err) - } -} diff --git a/physical/consul/testing.go b/physical/consul/testing.go new file mode 100644 index 0000000000000..0a41b882e1d7a --- /dev/null +++ b/physical/consul/testing.go @@ -0,0 +1,73 @@ +package consul + +import ( + "os" + "sync" + "testing" + "time" + + "github.com/hashicorp/consul/api" + dockertest "gopkg.in/ory-am/dockertest.v2" +) + +var ( + testImagePull sync.Once +) + +func PrepareConsulTestContainer(t *testing.T) (cid dockertest.ContainerID, retAddress string) { + if os.Getenv("CONSUL_HTTP_ADDR") != "" { + return "", os.Getenv("CONSUL_HTTP_ADDR") + } + + // Without this the checks for whether the container has started seem to + // never actually pass. There's really no reason to expose the test + // containers, so don't. + dockertest.BindDockerToLocalhost = "yep" + + testImagePull.Do(func() { + dockertest.Pull(dockertest.ConsulImageName) + }) + + try := 0 + cid, connErr := dockertest.ConnectToConsul(60, 500*time.Millisecond, func(connAddress string) bool { + try += 1 + // Build a client and verify that the credentials work + config := api.DefaultConfig() + config.Address = connAddress + config.Token = dockertest.ConsulACLMasterToken + client, err := api.NewClient(config) + if err != nil { + if try > 50 { + panic(err) + } + return false + } + + _, err = client.KV().Put(&api.KVPair{ + Key: "setuptest", + Value: []byte("setuptest"), + }, nil) + if err != nil { + if try > 50 { + panic(err) + } + return false + } + + retAddress = connAddress + return true + }) + + if connErr != nil { + t.Fatalf("could not connect to consul: %v", connErr) + } + + return +} + +func CleanupConsulTestContainer(t *testing.T, cid dockertest.ContainerID) { + err := cid.KillRemove() + if err != nil { + t.Fatal(err) + } +} diff --git a/vault/external_tests/storagepacker/storagepacker_sharding_test.go b/vault/external_tests/storagepacker/storagepacker_sharding_test.go new file mode 100644 index 0000000000000..3f44a1e9aeae3 --- /dev/null +++ b/vault/external_tests/storagepacker/storagepacker_sharding_test.go @@ -0,0 +1,149 @@ +package storagepacker + +import ( + "context" + "crypto/rand" + "encoding/base64" + "fmt" + "os" + "testing" + "time" + + "github.com/golang/protobuf/ptypes" + consulapi "github.com/hashicorp/consul/api" + log "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/helper/logging" + "github.com/hashicorp/vault/helper/storagepacker" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/logical" + "github.com/hashicorp/vault/logical/plugin/pb" + "github.com/hashicorp/vault/physical/consul" + "github.com/hashicorp/vault/vault" + dockertest "gopkg.in/ory-am/dockertest.v2" +) + +func TestStoragePacker_Sharding(t *testing.T) { + var token string + addr := os.Getenv("CONSUL_HTTP_ADDR") + if addr == "" { + cid, connURL := consul.PrepareConsulTestContainer(t) + if cid != "" { + defer consul.CleanupConsulTestContainer(t, cid) + } + addr = connURL + token = dockertest.ConsulACLMasterToken + } + + conf := consulapi.DefaultConfig() + conf.Address = addr + conf.Token = token + consulClient, err := consulapi.NewClient(conf) + if err != nil { + t.Fatalf("err: %v", err) + } + + randPath := fmt.Sprintf("vault-%d/", time.Now().Unix()) + defer func() { + consulClient.KV().DeleteTree(randPath, nil) + }() + + logger := logging.NewVaultLogger(log.Trace) + + b, err := consul.NewConsulBackend(map[string]string{ + "address": conf.Address, + "path": randPath, + "max_parallel": "256", + "token": conf.Token, + }, logger) + if err != nil { + t.Fatalf("err: %s", err) + } + + randBytes := make([]byte, 100000, 100000) + n, err := rand.Read(randBytes) + if n != 100000 { + t.Fatalf("expected 100k bytes, read %d", n) + } + if err != nil { + t.Fatal(err) + } + randString := base64.StdEncoding.EncodeToString(randBytes) + + cluster := vault.NewTestCluster(t, &vault.CoreConfig{ + Physical: b, + }, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + NumCores: 1, + Logger: logger, + }) + + cluster.Start() + defer cluster.Cleanup() + + core := cluster.Cores[0] + vault.TestWaitActive(t, core.Core) + ctx := context.Background() + numEntries := 5000 + + storage := logical.NewLogicalStorage(core.UnderlyingStorage) + bucketStorageView := logical.NewStorageView(storage, "packer/buckets/") + packer, err := storagepacker.NewStoragePackerV2(ctx, &storagepacker.Config{ + BucketStorageView: bucketStorageView, + ConfigStorageView: logical.NewStorageView(storage, ""), + Logger: logger, + }) + if err != nil { + t.Fatal(err) + } + + protoSecret := &pb.Secret{ + InternalData: randString, + } + messageAsAny, err := ptypes.MarshalAny(protoSecret) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < numEntries; i++ { + if err := packer.PutItem(ctx, &storagepacker.Item{ + ID: fmt.Sprintf("%05d", i), + Message: messageAsAny, + }); err != nil { + t.Fatal(err) + } + } + + /* + buckets, err := logical.CollectKeys(ctx, bucketStorageView.SubView("v2/")) + if err != nil { + t.Fatal(err) + } + if len(buckets) == 256 { + t.Fatalf("%d", len(buckets)) + } + t.Log(len(buckets)) + t.Log(buckets) + */ + + // Create a new packer, then start looking for expected values + packer, err = storagepacker.NewStoragePackerV2(ctx, &storagepacker.Config{ + BucketStorageView: bucketStorageView, + ConfigStorageView: logical.NewStorageView(storage, ""), + Logger: logger, + }) + if err != nil { + t.Fatal(err) + } + + t.Log("created new packer, validating entries") + for i := 0; i < numEntries; i++ { + item, err := packer.GetItem(ctx, fmt.Sprintf("%05d", i)) + if err != nil { + t.Fatal(err) + } + if item == nil { + t.Fatal("nil item") + } + } + t.Log("validation complete") +} From 3b869ed10cdf3d8fe03528b1f8fcb3beda609177 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 13 Feb 2019 19:37:05 -0500 Subject: [PATCH 31/38] Re add some test output code --- .../storagepacker_sharding_test.go | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/vault/external_tests/storagepacker/storagepacker_sharding_test.go b/vault/external_tests/storagepacker/storagepacker_sharding_test.go index 3f44a1e9aeae3..c620f10a703d2 100644 --- a/vault/external_tests/storagepacker/storagepacker_sharding_test.go +++ b/vault/external_tests/storagepacker/storagepacker_sharding_test.go @@ -113,17 +113,12 @@ func TestStoragePacker_Sharding(t *testing.T) { } } - /* - buckets, err := logical.CollectKeys(ctx, bucketStorageView.SubView("v2/")) - if err != nil { - t.Fatal(err) - } - if len(buckets) == 256 { - t.Fatalf("%d", len(buckets)) - } - t.Log(len(buckets)) - t.Log(buckets) - */ + buckets, err := logical.CollectKeys(ctx, bucketStorageView.SubView("v2/")) + if err != nil { + t.Fatal(err) + } + t.Log(len(buckets)) + t.Log(buckets) // Create a new packer, then start looking for expected values packer, err = storagepacker.NewStoragePackerV2(ctx, &storagepacker.Config{ From 8bd03b7d9bca86e3ac0ed5cf52b904966e73be3b Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 13 Feb 2019 19:47:58 -0500 Subject: [PATCH 32/38] Relevel some debugging --- helper/storagepacker/storagepacker_v2.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 708eb04e00fbb..f20547eefdd40 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -316,10 +316,10 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, } shards[shardKey] = shardedBucket // If it was equal we'd be locked already - s.Logger.Debug("created shard", "shard_key", shardKey) + s.Logger.Trace("created shard", "shard_key", shardKey) // Don't try to lock the same lock twice in case it hashes that way if _, ok := shardLocks[lock]; !ok { - s.Logger.Debug("locking lock", "shard_key", shardKey) + s.Logger.Trace("locking lock", "shard_key", shardKey) lock.Lock() shardLocks[lock] = struct{}{} } @@ -356,7 +356,7 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, } } for k, v := range shards { - s.Logger.Debug("storing bucket", "shard", k) + s.Logger.Trace("storing bucket", "shard", k) if err := s.storeBucket(ctx, v, false); err != nil { s.Logger.Debug("encountered error", "shard", k) retErr = multierror.Append(retErr, err) @@ -376,7 +376,6 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, s.Logger.Debug("updating cache") s.bucketsCacheLock.Lock() { - s.Logger.Debug("in locked section") for _, v := range shards { s.bucketsCache.Insert(s.GetCacheKey(v.Key), v) } From aa2ccf6a3600fe1d98d2139b5d5add1bf38c91c0 Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Wed, 13 Feb 2019 19:48:45 -0500 Subject: [PATCH 33/38] Remove some debugging --- helper/storagepacker/storagepacker_v2.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index f20547eefdd40..1927a59e3080a 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -402,8 +402,6 @@ func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket, return nil } - s.Logger.Trace("number of items in bucket", "num_items", len(bucket.ItemMap)) - marshaledBucket, err := proto.Marshal(bucket.Bucket) if err != nil { return errwrap.Wrapf("failed to marshal bucket: {{err}}", err) @@ -416,8 +414,6 @@ func (s *StoragePackerV2) storeBucket(ctx context.Context, bucket *LockedBucket, return errwrap.Wrapf("failed to compress packed bucket: {{err}}", err) } - s.Logger.Trace("size of compressed bucket", "size", len(compressedBucket)) - // Store the compressed value err = s.BucketStorageView.Put(ctx, &logical.StorageEntry{ Key: bucket.Key, From d03aadaeabfc8bd88279881635f799898cd2db9a Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Mon, 25 Mar 2019 10:42:15 -0400 Subject: [PATCH 34/38] remove unneeded code and move a cleanup code line outside the loop (#6440) --- helper/storagepacker/storagepacker_v2.go | 6 ++-- helper/strutil/strutil.go | 44 ------------------------ vault/identity_store_util.go | 4 +-- 3 files changed, 6 insertions(+), 48 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 1927a59e3080a..367db1012a146 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -289,9 +289,10 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, if lock != bucketLock { lock.Unlock() } - // Empty the map - shardLocks = make(map[*locksutil.LockEntry]struct{}, 32) } + + // Empty the map + shardLocks = make(map[*locksutil.LockEntry]struct{}, 32) }() } @@ -390,6 +391,7 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, cleanupCache() } } + s.bucketsCacheLock.Unlock() return retErr.ErrorOrNil() diff --git a/helper/strutil/strutil.go b/helper/strutil/strutil.go index dbfb68c9ecbe7..3b54b720b883f 100644 --- a/helper/strutil/strutil.go +++ b/helper/strutil/strutil.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "sort" - "strconv" "strings" "github.com/hashicorp/errwrap" @@ -365,49 +364,6 @@ func AppendIfMissing(slice []string, i string) []string { return append(slice, i) } -// BitMaskedIndex returns the integer value formed from the given number of most -// significant bits of a given byte slice. -func BitMaskedIndex(input []byte, bitCount int) (int64, error) { - switch { - case len(input) == 0: - return -1, fmt.Errorf("input length is zero") - case bitCount <= 0: - return -1, fmt.Errorf("bit count zero or negative") - case bitCount > len(input)*8: - return -1, fmt.Errorf("input is shorter for the given bit count") - } - - if bitCount < 8 { - return int64(uint8(input[0]) >> uint8(8-bitCount)), nil - } - - decimalVal := int64(uint8(input[0])) - input = input[1:] - bitCount -= 8 - - for bitCount > 8 { - decimalVal = decimalVal*256 + int64(int(input[0])) - bitCount -= 8 - input = input[1:] - } - - decimalVal = decimalVal << uint8(bitCount) - decimalVal += int64(uint8(input[0]) >> uint8(8-bitCount)) - - return decimalVal, nil -} - -// BitMaskedIndexHex returnes the hex value formed from the given number of -// most significant bits of a given byte slice. -func BitMaskedIndexHex(input []byte, bitCount int) (string, error) { - index, err := BitMaskedIndex(input, bitCount) - if err != nil { - return "", err - } - // Convert the value to hex - return strconv.FormatInt(index, 16), nil -} - // MergeSlices adds an arbitrary number of slices together, uniquely func MergeSlices(args ...[]string) []string { all := map[string]struct{}{} diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 76b2400a1c84f..a61148e5a112e 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -270,7 +270,7 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { // Group's namespace doesn't exist anymore but the group // from the namespace still exists. i.logger.Warn("deleting group and its any existing aliases", "name", group.Name, "namespace_id", group.NamespaceID) - err = i.groupPacker.DeleteItem(group.ID) + err = i.groupPacker.DeleteItem(ctx, group.ID) if err != nil { return err } @@ -446,7 +446,7 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { // Entity's namespace doesn't exist anymore but the // entity from the namespace still exists. i.logger.Warn("deleting entity and its any existing aliases", "name", entity.Name, "namespace_id", entity.NamespaceID) - err = i.entityPacker.DeleteItem(entity.ID) + err = i.entityPacker.DeleteItem(ctx, entity.ID) if err != nil { return err } From 856add82c098972b190f1978cecbcf018d7153d5 Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Tue, 26 Mar 2019 12:43:10 -0400 Subject: [PATCH 35/38] Fix tests --- helper/storagepacker/storagepacker_v2.go | 2 +- helper/storagepacker/storagepacker_v2_test.go | 30 ++++++++++++------- .../external_tests/identity/identity_test.go | 9 ++++-- vault/identity_store_groups_test.go | 4 ++- vault/identity_store_test.go | 4 ++- 5 files changed, 32 insertions(+), 17 deletions(-) diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 367db1012a146..f5a49c2da28ff 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -33,7 +33,7 @@ var ( ) type Config struct { - // BucketStorageView is the storage to be used by all the buckets +// BucketStorageView is the storage to be used by all the buckets BucketStorageView *logical.StorageView `json:"-"` // ConfigStorageView is the storage to store config info diff --git a/helper/storagepacker/storagepacker_v2_test.go b/helper/storagepacker/storagepacker_v2_test.go index 48ec063fe11f7..48f95a6abe1ae 100644 --- a/helper/storagepacker/storagepacker_v2_test.go +++ b/helper/storagepacker/storagepacker_v2_test.go @@ -10,6 +10,7 @@ import ( log "github.com/hashicorp/go-hclog" uuid "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/helper/identity" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/logical" ) @@ -24,12 +25,14 @@ func getStoragePacker(tb testing.TB) *StoragePackerV2 { if err != nil { tb.Fatal(err) } - return storagePacker + return storagePacker.(*StoragePackerV2) } func BenchmarkStoragePackerV2(b *testing.B) { storagePacker := getStoragePacker(b) + ctx := namespace.RootContext(nil) + for i := 0; i < b.N; i++ { itemID, err := uuid.GenerateUUID() if err != nil { @@ -40,12 +43,12 @@ func BenchmarkStoragePackerV2(b *testing.B) { ID: itemID, } - err = storagePacker.PutItem(item) + err = storagePacker.PutItem(ctx, item) if err != nil { b.Fatal(err) } - fetchedItem, err := storagePacker.GetItem(itemID) + fetchedItem, err := storagePacker.GetItem(ctx, itemID) if err != nil { b.Fatal(err) } @@ -58,12 +61,12 @@ func BenchmarkStoragePackerV2(b *testing.B) { b.Fatalf("bad: item ID; expected: %q\n actual: %q", item.ID, fetchedItem.ID) } - err = storagePacker.DeleteItem(item.ID) + err = storagePacker.DeleteItem(ctx, item.ID) if err != nil { b.Fatal(err) } - fetchedItem, err = storagePacker.GetItem(item.ID) + fetchedItem, err = storagePacker.GetItem(ctx, item.ID) if err != nil { b.Fatal(err) } @@ -81,13 +84,15 @@ func TestStoragePackerV2(t *testing.T) { ID: "item1", } - err := storagePacker.PutItem(item1) + ctx := namespace.RootContext(nil) + + err := storagePacker.PutItem(ctx, item1) if err != nil { t.Fatal(err) } // Verify that it can be read - fetchedItem, err := storagePacker.GetItem(item1.ID) + fetchedItem, err := storagePacker.GetItem(ctx, item1.ID) if err != nil { t.Fatal(err) } @@ -100,13 +105,13 @@ func TestStoragePackerV2(t *testing.T) { } // Delete item1 - err = storagePacker.DeleteItem(item1.ID) + err = storagePacker.DeleteItem(ctx, item1.ID) if err != nil { t.Fatal(err) } // Check that the deletion was successful - fetchedItem, err = storagePacker.GetItem(item1.ID) + fetchedItem, err = storagePacker.GetItem(ctx, item1.ID) if err != nil { t.Fatal(err) } @@ -154,7 +159,10 @@ func TestStoragePackerV2_SerializeDeserializeComplexItem_Version1(t *testing.T) if err != nil { t.Fatal(err) } - err = storagePacker.PutItem(&Item{ + + ctx := namespace.RootContext(nil) + + err = storagePacker.PutItem(ctx, &Item{ ID: entity.ID, Message: marshaledEntity, }) @@ -162,7 +170,7 @@ func TestStoragePackerV2_SerializeDeserializeComplexItem_Version1(t *testing.T) t.Fatal(err) } - itemFetched, err := storagePacker.GetItem(entity.ID) + itemFetched, err := storagePacker.GetItem(ctx, entity.ID) if err != nil { t.Fatal(err) } diff --git a/vault/external_tests/identity/identity_test.go b/vault/external_tests/identity/identity_test.go index 3483788b0f679..6e4587fa8beb3 100644 --- a/vault/external_tests/identity/identity_test.go +++ b/vault/external_tests/identity/identity_test.go @@ -6,6 +6,7 @@ import ( log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/builtin/credential/ldap" + "github.com/hashicorp/vault/helper/namespace" vaulthttp "github.com/hashicorp/vault/http" "github.com/hashicorp/vault/logical" "github.com/hashicorp/vault/vault" @@ -235,7 +236,9 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) { // Remove its member entities group.MemberEntityIDs = nil - err = identityStore.UpsertGroup(group, true) + ctx := namespace.RootContext(nil) + + err = identityStore.UpsertGroup(ctx, group, true) if err != nil { t.Fatal(err) } @@ -256,7 +259,7 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) { // Remove its member entities group.MemberEntityIDs = nil - err = identityStore.UpsertGroup(group, true) + err = identityStore.UpsertGroup(ctx, group, true) if err != nil { t.Fatal(err) } @@ -277,7 +280,7 @@ func TestIdentityStore_Integ_GroupAliases(t *testing.T) { // Remove its member entities group.MemberEntityIDs = nil - err = identityStore.UpsertGroup(group, true) + err = identityStore.UpsertGroup(ctx, group, true) if err != nil { t.Fatal(err) } diff --git a/vault/identity_store_groups_test.go b/vault/identity_store_groups_test.go index d3043bf8a29f1..0df183a78d70b 100644 --- a/vault/identity_store_groups_test.go +++ b/vault/identity_store_groups_test.go @@ -40,8 +40,10 @@ func TestIdentityStore_GroupEntityMembershipUpgrade(t *testing.T) { // Manually add an invalid entity as the group's member group.MemberEntityIDs = []string{"invalidentityid"} + ctx := namespace.RootContext(nil) + // Persist the group - err = c.identityStore.UpsertGroupInTxn(txn, group, true) + err = c.identityStore.UpsertGroupInTxn(ctx, txn, group, true) if err != nil { t.Fatal(err) } diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index 347c67b5b4cd8..433b540798129 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -86,7 +86,9 @@ func TestIdentityStore_UnsealingWhenConflictingAliasNames(t *testing.T) { ID: entity2.ID, Message: entity2Any, } - if err = c.identityStore.entityPacker.PutItem(item); err != nil { + + ctx := namespace.RootContext(nil) + if err = c.identityStore.entityPacker.PutItem(ctx, item); err != nil { t.Fatal(err) } From 7bb3528560d7743426e22bac8acd9c8850f0126b Mon Sep 17 00:00:00 2001 From: Jeff Mitchell Date: Tue, 2 Apr 2019 10:14:09 -0400 Subject: [PATCH 36/38] Update generated proto --- helper/forwarding/types.pb.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index 09813b6423a55..e7b104c6a7db3 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -23,7 +23,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type Request struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - //uint64 id = 1; + // uint64 id = 1; Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -115,12 +115,12 @@ type URL struct { Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` // This isn't needed now but might be in the future, so we'll skip the // number to keep the ordering in net/url - //UserInfo user = 3; + // UserInfo user = 3; Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` // This also isn't needed right now, but we'll reserve the number - //bool force_query = 7; + // bool force_query = 7; RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -244,7 +244,7 @@ func (m *HeaderEntry) GetValues() []string { type Response struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - //uint64 id = 1; + // uint64 id = 1; StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Added in 0.6.2 to ensure that the content-type is set appropriately, as From 3fa22d46b7f9da4e819cd6c10765a5abb206165b Mon Sep 17 00:00:00 2001 From: vishalnayak Date: Thu, 9 May 2019 10:42:25 -0400 Subject: [PATCH 37/38] fix tests --- .../external_tests/storagepacker/legacy_storagepacker.go | 8 ++++---- .../storagepacker/storagepacker_sharding_test.go | 4 +++- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/vault/external_tests/storagepacker/legacy_storagepacker.go b/vault/external_tests/storagepacker/legacy_storagepacker.go index 396be569a6b7d..b852e72ae0ba6 100644 --- a/vault/external_tests/storagepacker/legacy_storagepacker.go +++ b/vault/external_tests/storagepacker/legacy_storagepacker.go @@ -205,7 +205,7 @@ func (s *LegacyStoragePacker) DeleteItem(ctx context.Context, itemID string) err bucket.Items = append(bucket.Items[:foundIdx], bucket.Items[foundIdx+1:]...) // Persist bucket entry only if there is an update - err = s.putBucket(ctx, &sp2.LockedBucket{Bucket: &bucket}) + err = s.PutBucket(ctx, &sp2.LockedBucket{Bucket: &bucket}) if err != nil { return err } @@ -215,7 +215,7 @@ func (s *LegacyStoragePacker) DeleteItem(ctx context.Context, itemID string) err } // Put stores a packed bucket entry -func (s *LegacyStoragePacker) putBucket(ctx context.Context, bucket *sp2.LockedBucket) error { +func (s *LegacyStoragePacker) PutBucket(ctx context.Context, bucket *sp2.LockedBucket) error { if bucket == nil { return fmt.Errorf("nil bucket entry") } @@ -294,7 +294,7 @@ func (s *LegacyStoragePacker) PutItem(ctx context.Context, item *sp2.Item) error bucketKey := s.BucketKey(item.ID) bucket := &sp2.Bucket{ - Key: bucketPath, + Key: bucketKey, } // In this case, we persist the storage entry regardless of the read @@ -336,7 +336,7 @@ func (s *LegacyStoragePacker) PutItem(ctx context.Context, item *sp2.Item) error } } - return s.putBucket(ctx, &sp2.LockedBucket{Bucket: bucket}) + return s.PutBucket(ctx, &sp2.LockedBucket{Bucket: bucket}) } // NewLegacyStoragePacker creates a new storage packer for a given view diff --git a/vault/external_tests/storagepacker/storagepacker_sharding_test.go b/vault/external_tests/storagepacker/storagepacker_sharding_test.go index 6b0bf1cf11ff1..bfadc0508a798 100644 --- a/vault/external_tests/storagepacker/storagepacker_sharding_test.go +++ b/vault/external_tests/storagepacker/storagepacker_sharding_test.go @@ -13,7 +13,9 @@ import ( consulapi "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/storagepacker" + "github.com/hashicorp/vault/helper/testhelpers/consul" vaulthttp "github.com/hashicorp/vault/http" + physConsul "github.com/hashicorp/vault/physical/consul" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/sdk/plugin/pb" @@ -44,7 +46,7 @@ func TestStoragePacker_Sharding(t *testing.T) { logger := logging.NewVaultLogger(log.Trace) - b, err := consul.NewConsulBackend(map[string]string{ + b, err := physConsul.NewConsulBackend(map[string]string{ "address": conf.Address, "path": randPath, "max_parallel": "256", From 0e08c113af081133fb3b88f7d7bac8ca5aa494b1 Mon Sep 17 00:00:00 2001 From: Vishal Nayak Date: Fri, 10 May 2019 12:37:18 -0400 Subject: [PATCH 38/38] StoragePackerV2: Item storage from proto.Any to []byte (#6715) * Change item storage from proto.Any to []byte * address review feedback --- helper/forwarding/types.pb.go | 8 +- helper/storagepacker/storagepacker_v2.go | 21 ++- helper/storagepacker/types.pb.go | 65 +++++---- helper/storagepacker/types.proto | 7 +- .../storagepacker_sharding_test.go | 10 +- vault/identity_store.go | 23 ++-- vault/identity_store_aliases.go | 9 +- vault/identity_store_entities.go | 8 +- vault/identity_store_util.go | 130 ++++++++++++++---- 9 files changed, 181 insertions(+), 100 deletions(-) diff --git a/helper/forwarding/types.pb.go b/helper/forwarding/types.pb.go index 4ed46ba405e81..d17c4f1b4d755 100644 --- a/helper/forwarding/types.pb.go +++ b/helper/forwarding/types.pb.go @@ -23,7 +23,7 @@ const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type Request struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` Url *URL `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"` HeaderEntries map[string]*HeaderEntry `protobuf:"bytes,4,rep,name=header_entries,json=headerEntries,proto3" json:"header_entries,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -115,12 +115,12 @@ type URL struct { Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` // This isn't needed now but might be in the future, so we'll skip the // number to keep the ordering in net/url - // UserInfo user = 3; + //UserInfo user = 3; Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` RawPath string `protobuf:"bytes,6,opt,name=raw_path,json=rawPath,proto3" json:"raw_path,omitempty"` // This also isn't needed right now, but we'll reserve the number - // bool force_query = 7; + //bool force_query = 7; RawQuery string `protobuf:"bytes,8,opt,name=raw_query,json=rawQuery,proto3" json:"raw_query,omitempty"` Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -244,7 +244,7 @@ func (m *HeaderEntry) GetValues() []string { type Response struct { // Not used right now but reserving in case it turns out that streaming // makes things more economical on the gRPC side - // uint64 id = 1; + //uint64 id = 1; StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"` Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"` // Added in 0.6.2 to ensure that the content-type is set appropriately, as diff --git a/helper/storagepacker/storagepacker_v2.go b/helper/storagepacker/storagepacker_v2.go index 6fba73f6443d3..8630045ea448e 100644 --- a/helper/storagepacker/storagepacker_v2.go +++ b/helper/storagepacker/storagepacker_v2.go @@ -12,7 +12,6 @@ import ( radix "github.com/armon/go-radix" "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" multierror "github.com/hashicorp/go-multierror" @@ -312,7 +311,7 @@ func (s *StoragePackerV2) shardBucket(ctx context.Context, bucket *LockedBucket, LockEntry: lock, Bucket: &Bucket{ Key: fmt.Sprintf("%s/%s", bucket.Key, shardKey), - ItemMap: make(map[string]*any.Any), + ItemMap: make(map[string][]byte), }, } shards[shardKey] = shardedBucket @@ -472,12 +471,12 @@ func (s *LockedBucket) upsert(item *Item) error { } if s.ItemMap == nil { - s.ItemMap = make(map[string]*any.Any) + s.ItemMap = make(map[string][]byte) } itemHash := GetItemIDHash(item.ID) - s.ItemMap[itemHash] = item.Message + s.ItemMap[itemHash] = item.Data return nil } @@ -591,14 +590,14 @@ func (s *StoragePackerV2) GetItem(ctx context.Context, itemID string) (*Item, er itemHash := GetItemIDHash(itemID) - item, ok := bucket.ItemMap[itemHash] + data, ok := bucket.ItemMap[itemHash] if !ok { return nil, nil } return &Item{ - ID: itemID, - Message: item, + ID: itemID, + Data: data, }, nil } @@ -612,6 +611,14 @@ func (s *StoragePackerV2) PutItem(ctx context.Context, item *Item) error { return fmt.Errorf("missing ID in item") } + if item.Data == nil { + return fmt.Errorf("missing data in item") + } + + if item.Message != nil { + return fmt.Errorf("'Message' is deprecated; use 'Data' instead") + } + // Get the bucket key bucketKey := s.BucketStorageKeyForItemID(item.ID) cacheKey := s.GetCacheKey(bucketKey) diff --git a/helper/storagepacker/types.pb.go b/helper/storagepacker/types.pb.go index 8d42ec076de52..61a94977bc4c5 100644 --- a/helper/storagepacker/types.pb.go +++ b/helper/storagepacker/types.pb.go @@ -29,8 +29,11 @@ type Item struct { // described above, the caller *must not* rely on this value to be // consistent with what they passed in. ID string `sentinel:"" protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // message is the contents of the item - Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Message holds the contents of the item + // Deprecated: Use 'Data' instead + Message *any.Any `sentinel:"" protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Data holds the contents of the item. Used in storage packer v2. + Data []byte `sentinel:"" protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -75,6 +78,13 @@ func (m *Item) GetMessage() *any.Any { return nil } +func (m *Item) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + // Bucket is a construct to hold multiple items within itself. This // abstraction contains multiple buckets of the same kind within itself and // shares amont them the items that get inserted. When the bucket as a whole @@ -87,10 +97,10 @@ type Bucket struct { // Items holds the items contained within this bucket. Used by v1. Items []*Item `sentinel:"" protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"` // ItemMap stores a mapping of item ID to message. Used by v2. - ItemMap map[string]*any.Any `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ItemMap map[string][]byte `sentinel:"" protobuf:"bytes,3,rep,name=item_map,json=itemMap,proto3" json:"item_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Bucket) Reset() { *m = Bucket{} } @@ -132,7 +142,7 @@ func (m *Bucket) GetItems() []*Item { return nil } -func (m *Bucket) GetItemMap() map[string]*any.Any { +func (m *Bucket) GetItemMap() map[string][]byte { if m != nil { return m.ItemMap } @@ -142,29 +152,30 @@ func (m *Bucket) GetItemMap() map[string]*any.Any { func init() { proto.RegisterType((*Item)(nil), "storagepacker.Item") proto.RegisterType((*Bucket)(nil), "storagepacker.Bucket") - proto.RegisterMapType((map[string]*any.Any)(nil), "storagepacker.Bucket.ItemMapEntry") + proto.RegisterMapType((map[string][]byte)(nil), "storagepacker.Bucket.ItemMapEntry") } func init() { proto.RegisterFile("helper/storagepacker/types.proto", fileDescriptor_c0e98c66c4f51b7f) } var fileDescriptor_c0e98c66c4f51b7f = []byte{ - // 276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xcf, 0x4b, 0xc3, 0x30, - 0x14, 0xc7, 0x69, 0xeb, 0x36, 0x7d, 0x53, 0x91, 0xe8, 0xa1, 0xee, 0x54, 0x7a, 0xaa, 0x1e, 0x12, - 0x9c, 0x17, 0x11, 0x3c, 0x38, 0x50, 0xf0, 0x20, 0x48, 0x8f, 0x5e, 0x24, 0xed, 0x9e, 0x6d, 0xe8, - 0x8f, 0x84, 0x24, 0x1d, 0xf4, 0x1f, 0xf5, 0xef, 0x91, 0x36, 0x0e, 0x9c, 0x0c, 0x6f, 0x2f, 0x7c, - 0x3f, 0xf9, 0xe4, 0x1b, 0x1e, 0x44, 0x25, 0xd6, 0x0a, 0x35, 0x33, 0x56, 0x6a, 0x5e, 0xa0, 0xe2, - 0x79, 0x85, 0x9a, 0xd9, 0x5e, 0xa1, 0xa1, 0x4a, 0x4b, 0x2b, 0xc9, 0xc9, 0x4e, 0xb4, 0xb8, 0x2c, - 0xa4, 0x2c, 0x6a, 0x64, 0x63, 0x98, 0x75, 0x9f, 0x8c, 0xb7, 0xbd, 0x23, 0xe3, 0x67, 0x38, 0x78, - 0xb1, 0xd8, 0x90, 0x53, 0xf0, 0xc5, 0x3a, 0xf4, 0x22, 0x2f, 0x39, 0x4a, 0x7d, 0xb1, 0x26, 0x14, - 0x66, 0x0d, 0x1a, 0xc3, 0x0b, 0x0c, 0xfd, 0xc8, 0x4b, 0xe6, 0xcb, 0x0b, 0xea, 0x24, 0x74, 0x2b, - 0xa1, 0x8f, 0x6d, 0x9f, 0x6e, 0xa1, 0xf8, 0xcb, 0x83, 0xe9, 0xaa, 0xcb, 0x2b, 0xb4, 0xe4, 0x0c, - 0x82, 0x0a, 0xfb, 0x1f, 0xd7, 0x30, 0x92, 0x2b, 0x98, 0x08, 0x8b, 0x8d, 0x09, 0xfd, 0x28, 0x48, - 0xe6, 0xcb, 0x73, 0xba, 0x53, 0x8f, 0x0e, 0x05, 0x52, 0x47, 0x90, 0x07, 0x38, 0x1c, 0x86, 0x8f, - 0x86, 0xab, 0x30, 0x18, 0xe9, 0xf8, 0x0f, 0xed, 0x5e, 0x19, 0x2f, 0xbd, 0x72, 0xf5, 0xd4, 0x5a, - 0xdd, 0xa7, 0x33, 0xe1, 0x4e, 0x8b, 0x37, 0x38, 0xfe, 0x1d, 0xec, 0xe9, 0x72, 0x0d, 0x93, 0x0d, - 0xaf, 0xbb, 0xff, 0xbf, 0xe5, 0x90, 0x7b, 0xff, 0xce, 0x5b, 0xdd, 0xbc, 0xb3, 0x42, 0xd8, 0xb2, - 0xcb, 0x68, 0x2e, 0x1b, 0x56, 0x72, 0x53, 0x8a, 0x5c, 0x6a, 0xc5, 0x36, 0xbc, 0xab, 0x2d, 0xdb, - 0xb7, 0x89, 0x6c, 0x3a, 0xba, 0x6e, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x46, 0x9d, 0x8a, 0xcb, - 0xa8, 0x01, 0x00, 0x00, + // 289 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x3d, 0x4f, 0xf3, 0x30, + 0x14, 0x85, 0xe5, 0xa4, 0x1f, 0xef, 0xeb, 0x16, 0x84, 0x4c, 0x87, 0xd0, 0x29, 0xea, 0x14, 0x16, + 0x5b, 0x94, 0x05, 0x55, 0x62, 0xa0, 0x12, 0x03, 0x03, 0x8b, 0xc7, 0x2e, 0xc8, 0x49, 0x2e, 0x89, + 0x95, 0x0f, 0x5b, 0xb6, 0x53, 0x29, 0xff, 0x8e, 0x9f, 0x86, 0x12, 0x53, 0x89, 0xa2, 0x6e, 0xe7, + 0xea, 0x3c, 0x3e, 0x3e, 0xf6, 0xc5, 0x71, 0x09, 0xb5, 0x06, 0xc3, 0xac, 0x53, 0x46, 0x14, 0xa0, + 0x45, 0x56, 0x81, 0x61, 0xae, 0xd7, 0x60, 0xa9, 0x36, 0xca, 0x29, 0x72, 0x75, 0x66, 0xad, 0xef, + 0x0a, 0xa5, 0x8a, 0x1a, 0xd8, 0x68, 0xa6, 0xdd, 0x27, 0x13, 0x6d, 0xef, 0xc9, 0xcd, 0x01, 0x4f, + 0xde, 0x1c, 0x34, 0xe4, 0x1a, 0x07, 0x32, 0x8f, 0x50, 0x8c, 0x92, 0xff, 0x3c, 0x90, 0x39, 0xa1, + 0x78, 0xde, 0x80, 0xb5, 0xa2, 0x80, 0x28, 0x88, 0x51, 0xb2, 0xd8, 0xae, 0xa8, 0x0f, 0xa1, 0xa7, + 0x10, 0xfa, 0xd2, 0xf6, 0xfc, 0x04, 0x11, 0x82, 0x27, 0xb9, 0x70, 0x22, 0x0a, 0x63, 0x94, 0x2c, + 0xf9, 0xa8, 0x37, 0x5f, 0x08, 0xcf, 0xf6, 0x5d, 0x56, 0x81, 0x23, 0x37, 0x38, 0xac, 0xa0, 0xff, + 0xc9, 0x1f, 0x24, 0xb9, 0xc7, 0x53, 0xe9, 0xa0, 0xb1, 0x51, 0x10, 0x87, 0xc9, 0x62, 0x7b, 0x4b, + 0xcf, 0x2a, 0xd3, 0xa1, 0x14, 0xf7, 0x04, 0x79, 0xc6, 0xff, 0x06, 0xf1, 0xd1, 0x08, 0x1d, 0x85, + 0x23, 0xbd, 0xf9, 0x43, 0xfb, 0x5b, 0xc6, 0x43, 0xef, 0x42, 0xbf, 0xb6, 0xce, 0xf4, 0x7c, 0x2e, + 0xfd, 0xb4, 0xde, 0xe1, 0xe5, 0x6f, 0xe3, 0x42, 0x97, 0x15, 0x9e, 0x1e, 0x45, 0xdd, 0xf9, 0xa7, + 0x2e, 0xb9, 0x1f, 0x76, 0xc1, 0x13, 0xda, 0x3f, 0x1c, 0x58, 0x21, 0x5d, 0xd9, 0xa5, 0x34, 0x53, + 0x0d, 0x2b, 0x85, 0x2d, 0x65, 0xa6, 0x8c, 0x66, 0x47, 0xd1, 0xd5, 0x8e, 0x5d, 0xda, 0x43, 0x3a, + 0x1b, 0x3f, 0xe8, 0xf1, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x11, 0x50, 0x1a, 0x26, 0xa6, 0x01, 0x00, + 0x00, } diff --git a/helper/storagepacker/types.proto b/helper/storagepacker/types.proto index 4edfaf4f85721..189cd8e5aa266 100644 --- a/helper/storagepacker/types.proto +++ b/helper/storagepacker/types.proto @@ -14,8 +14,11 @@ message Item { // described above, the caller *must not* rely on this value to be // consistent with what they passed in. string id = 1; - // message is the contents of the item + // Message holds the contents of the item + // Deprecated: Use 'Data' instead google.protobuf.Any message = 2; + // Data holds the contents of the item. Used in storage packer v2. + bytes data = 3; } // Bucket is a construct to hold multiple items within itself. This @@ -30,5 +33,5 @@ message Bucket { // Items holds the items contained within this bucket. Used by v1. repeated Item items = 2; // ItemMap stores a mapping of item ID to message. Used by v2. - map item_map = 3; + map item_map = 3; } diff --git a/vault/external_tests/storagepacker/storagepacker_sharding_test.go b/vault/external_tests/storagepacker/storagepacker_sharding_test.go index bfadc0508a798..5205f678956eb 100644 --- a/vault/external_tests/storagepacker/storagepacker_sharding_test.go +++ b/vault/external_tests/storagepacker/storagepacker_sharding_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/proto" consulapi "github.com/hashicorp/consul/api" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/helper/storagepacker" @@ -93,18 +93,18 @@ func TestStoragePacker_Sharding(t *testing.T) { t.Fatal(err) } - protoSecret := &pb.Secret{ + secret := &pb.Secret{ InternalData: randString, } - messageAsAny, err := ptypes.MarshalAny(protoSecret) + secretProto, err := proto.Marshal(secret) if err != nil { t.Fatal(err) } for i := 0; i < numEntries; i++ { if err := packer.PutItem(ctx, &storagepacker.Item{ - ID: fmt.Sprintf("%05d", i), - Message: messageAsAny, + ID: fmt.Sprintf("%05d", i), + Data: secretProto, }); err != nil { t.Fatal(err) } diff --git a/vault/identity_store.go b/vault/identity_store.go index 75d007c765bf3..127e6dacf01c2 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -177,7 +177,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - items = append(items, &storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Data: message}) } for _, item := range items { entity, err := i.parseEntityFromBucketItem(ctx, item) @@ -246,7 +246,7 @@ func (i *IdentityStore) Invalidate(ctx context.Context, key string) { items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) items = append(items, bucket.Items...) for id, message := range bucket.ItemMap { - items = append(items, &storagepacker.Item{ID: id, Message: message}) + items = append(items, &storagepacker.Item{ID: id, Data: message}) } for _, item := range items { group, err := i.parseGroupFromBucketItem(item) @@ -302,8 +302,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto persistNeeded := false - var entity identity.Entity - err := ptypes.UnmarshalAny(item.Message, &entity) + entity, err := i.decodeEntity(item) if err != nil { // If we encounter an error, it would mean that the format of the // entity is an older one. Try decoding using the older format and if @@ -349,7 +348,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto entity.BucketKey = i.entityPacker.BucketKey(entity.ID) - pN, err := parseExtraEntityFromBucket(ctx, i, &entity) + pN, err := parseExtraEntityFromBucket(ctx, i, entity) if err != nil { return nil, err } @@ -358,16 +357,11 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto } if persistNeeded && !i.core.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) { - entityAsAny, err := ptypes.MarshalAny(&entity) + item, err := i.encodeEntity(entity) if err != nil { return nil, err } - item := &storagepacker.Item{ - ID: entity.ID, - Message: entityAsAny, - } - // Store the entity with new format err = i.entityPacker.PutItem(ctx, item) if err != nil { @@ -379,7 +373,7 @@ func (i *IdentityStore) parseEntityFromBucketItem(ctx context.Context, item *sto entity.NamespaceID = namespace.RootNamespaceID } - return &entity, nil + return entity, nil } func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*identity.Group, error) { @@ -387,8 +381,7 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide return nil, fmt.Errorf("nil item") } - var group identity.Group - err := ptypes.UnmarshalAny(item.Message, &group) + group, err := i.decodeGroup(item) if err != nil { return nil, errwrap.Wrapf("failed to decode group from storage bucket item: {{err}}", err) } @@ -399,7 +392,7 @@ func (i *IdentityStore) parseGroupFromBucketItem(item *storagepacker.Item) (*ide group.BucketKey = i.groupPacker.BucketKey(group.ID) - return &group, nil + return group, nil } // entityByAliasFactors fetches the entity based on factors of alias, i.e mount diff --git a/vault/identity_store_aliases.go b/vault/identity_store_aliases.go index 97cd9a5cce385..e5944780c90a5 100644 --- a/vault/identity_store_aliases.go +++ b/vault/identity_store_aliases.go @@ -8,7 +8,6 @@ import ( "github.com/golang/protobuf/ptypes" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -396,15 +395,11 @@ func (i *IdentityStore) pathAliasIDDelete() framework.OperationFunc { return nil, err } - // Persist the entity object - entityAsAny, err := ptypes.MarshalAny(entity) + // Persist the entity + item, err := i.encodeEntity(entity) if err != nil { return nil, err } - item := &storagepacker.Item{ - ID: entity.ID, - Message: entityAsAny, - } err = i.entityPacker.PutItem(ctx, item) if err != nil { diff --git a/vault/identity_store_entities.go b/vault/identity_store_entities.go index aa02c1ddb6ed0..1bc6e978dfe8d 100644 --- a/vault/identity_store_entities.go +++ b/vault/identity_store_entities.go @@ -11,7 +11,6 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/namespace" - "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/helper/strutil" @@ -724,15 +723,10 @@ func (i *IdentityStore) mergeEntity(ctx context.Context, txn *memdb.Txn, toEntit if persist && !isPerfSecondaryOrStandby { // Persist the entity which we are merging to - toEntityAsAny, err := ptypes.MarshalAny(toEntity) + item, err := i.encodeEntity(toEntity) if err != nil { return nil, err } - item := &storagepacker.Item{ - ID: toEntity.ID, - Message: toEntityAsAny, - } - err = i.entityPacker.PutItem(ctx, item) if err != nil { return nil, err diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index afc2d28e199b7..1da043a0f8f0a 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -8,6 +8,7 @@ import ( "sync" "sync/atomic" + proto "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" "github.com/hashicorp/errwrap" memdb "github.com/hashicorp/go-memdb" @@ -36,7 +37,7 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { } // Check for the legacy -> v2 upgrade case - upgradeLegacyStoragePacker := func(prefix string, packer storagepacker.StoragePacker) error { + upgradeLegacyStoragePacker := func(prefix string, packer storagepacker.StoragePacker, kind string) error { c.logger.Trace("checking for identity storagepacker upgrade", "prefix", prefix) bucketStorageView := logical.NewStorageView(c.identityStore.view, prefix+"buckets/") vals, err := bucketStorageView.List(ctx, "") @@ -75,6 +76,28 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { } // Set to the new prefix for _, item := range bucket.Items { + // Parse the entity or group from the Message (proto.Any) field + // and re-encode it as a proto message into the 'Data' field. + switch kind { + case "entity": + entity, err := c.identityStore.decodeEntity(item) + if err != nil { + return err + } + item, err = c.identityStore.encodeEntity(entity) + if err != nil { + return err + } + case "group": + group, err := c.identityStore.decodeGroup(item) + if err != nil { + return err + } + item, err = c.identityStore.encodeGroup(group) + if err != nil { + return err + } + } packer.PutItem(ctx, item) } } @@ -103,10 +126,10 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { return nil } - if err := upgradeLegacyStoragePacker(entityStoragePackerPrefix, c.identityStore.entityPacker); err != nil { + if err := upgradeLegacyStoragePacker(entityStoragePackerPrefix, c.identityStore.entityPacker, "entity"); err != nil { return err } - if err := upgradeLegacyStoragePacker(groupStoragePackerPrefix, c.identityStore.groupPacker); err != nil { + if err := upgradeLegacyStoragePacker(groupStoragePackerPrefix, c.identityStore.groupPacker, "group"); err != nil { return err } @@ -147,6 +170,74 @@ func (c *Core) loadIdentityStoreArtifacts(ctx context.Context) error { return loadFunc(ctx) } +func (i *IdentityStore) encodeEntity(entity *identity.Entity) (*storagepacker.Item, error) { + if entity == nil { + return nil, fmt.Errorf("nil entity") + } + + entityProto, err := proto.Marshal(entity) + if err != nil { + return nil, err + } + + return &storagepacker.Item{ + ID: entity.ID, + Data: entityProto, + }, nil +} + +func (i *IdentityStore) encodeGroup(group *identity.Group) (*storagepacker.Item, error) { + if group == nil { + return nil, fmt.Errorf("nil group") + } + + groupProto, err := proto.Marshal(group) + if err != nil { + return nil, err + } + + return &storagepacker.Item{ + ID: group.ID, + Data: groupProto, + }, nil +} + +func (i *IdentityStore) decodeGroup(item *storagepacker.Item) (*identity.Group, error) { + var group identity.Group + if item.Message != nil { + err := ptypes.UnmarshalAny(item.Message, &group) + if err != nil { + return nil, err + } + return &group, nil + } + + err := proto.Unmarshal(item.Data, &group) + if err != nil { + return nil, err + } + + return &group, nil +} + +func (i *IdentityStore) decodeEntity(item *storagepacker.Item) (*identity.Entity, error) { + var entity identity.Entity + if item.Message != nil { + err := ptypes.UnmarshalAny(item.Message, &entity) + if err != nil { + return nil, err + } + return &entity, nil + } + + err := proto.Unmarshal(item.Data, &entity) + if err != nil { + return nil, err + } + + return &entity, nil +} + func (i *IdentityStore) sanitizeName(name string) string { if i.disableLowerCasedNames { return name @@ -248,8 +339,8 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { // Need to check both map and Items in case it's during upgrading items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) items = append(items, bucket.Items...) - for id, message := range bucket.ItemMap { - items = append(items, &storagepacker.Item{ID: id, Message: message}) + for id, data := range bucket.ItemMap { + items = append(items, &storagepacker.Item{ID: id, Data: data}) } for _, item := range items { group, err := i.parseGroupFromBucketItem(item) @@ -424,8 +515,8 @@ func (i *IdentityStore) loadEntities(ctx context.Context) error { items := make([]*storagepacker.Item, 0, len(bucket.Items)+len(bucket.ItemMap)) items = append(items, bucket.Items...) - for id, message := range bucket.ItemMap { - items = append(items, &storagepacker.Item{ID: id, Message: message}) + for id, data := range bucket.ItemMap { + items = append(items, &storagepacker.Item{ID: id, Data: data}) } for _, item := range items { entity, err := i.parseEntityFromBucketItem(ctx, item) @@ -581,14 +672,12 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e if persist { // Persist the previous entity object - marshaledPreviousEntity, err := ptypes.MarshalAny(previousEntity) + item, err := i.encodeEntity(previousEntity) if err != nil { return err } - err = i.entityPacker.PutItem(ctx, &storagepacker.Item{ - ID: previousEntity.ID, - Message: marshaledPreviousEntity, - }) + + err = i.entityPacker.PutItem(ctx, item) if err != nil { return err } @@ -602,16 +691,11 @@ func (i *IdentityStore) upsertEntityInTxn(ctx context.Context, txn *memdb.Txn, e } if persist { - entityAsAny, err := ptypes.MarshalAny(entity) + // Persist the entity + item, err := i.encodeEntity(entity) if err != nil { return err } - item := &storagepacker.Item{ - ID: entity.ID, - Message: entityAsAny, - } - - // Persist the entity object err = i.entityPacker.PutItem(ctx, item) if err != nil { return err @@ -1552,16 +1636,10 @@ func (i *IdentityStore) UpsertGroupInTxn(ctx context.Context, txn *memdb.Txn, gr } if persist { - groupAsAny, err := ptypes.MarshalAny(group) + item, err := i.encodeGroup(group) if err != nil { return err } - - item := &storagepacker.Item{ - ID: group.ID, - Message: groupAsAny, - } - sent, err := sendGroupUpgrade(i, group) if err != nil { return err