Skip to content
Permalink
Browse files

Ensured index cache is best effort, refactored tests, validated edge …

…cases. (#1073)

Fixes #651

Current size also includes slice header.

Signed-off-by: Bartek Plotka <bwplotka@gmail.com>
  • Loading branch information...
bwplotka committed Apr 24, 2019
1 parent eddaf11 commit 7aad30d48ad32216f49884aae96cfced3f18e748
Showing with 622 additions and 316 deletions.
  1. +4 −0 CHANGELOG.md
  2. +18 −10 pkg/store/bucket.go
  3. +5 −1 pkg/store/bucket_e2e_test.go
  4. +0 −216 pkg/store/cache.go
  5. +322 −0 pkg/store/cache/cache.go
  6. +273 −0 pkg/store/cache/cache_test.go
  7. +0 −89 pkg/store/cache_test.go
@@ -19,6 +19,10 @@ We use *breaking* word for marking changes that are not backward compatible (rel

- [#1070](https://github.com/improbable-eng/thanos/pull/1070) Downsampling works back again. Deferred closer errors are now properly captured.

### Changed

- [#1073](https://github.com/improbable-eng/thanos/pull/1073) Store: index cache for requests. It now calculates the size properly (includes slice header), has anti-deadlock safeguard and reports more metrics.

## [v0.4.0-rc.0](https://github.com/improbable-eng/thanos/releases/tag/v0.4.0-rc.0) - 2019.04.18

:warning: **IMPORTANT** :warning: This is the last release that supports gossip. From Thanos v0.5.0, gossip will be completely removed.
@@ -26,6 +26,7 @@ import (
"github.com/improbable-eng/thanos/pkg/objstore"
"github.com/improbable-eng/thanos/pkg/pool"
"github.com/improbable-eng/thanos/pkg/runutil"
storecache "github.com/improbable-eng/thanos/pkg/store/cache"
"github.com/improbable-eng/thanos/pkg/store/storepb"
"github.com/improbable-eng/thanos/pkg/strutil"
"github.com/improbable-eng/thanos/pkg/tracing"
@@ -182,7 +183,7 @@ type BucketStore struct {
metrics *bucketStoreMetrics
bucket objstore.BucketReader
dir string
indexCache *indexCache
indexCache *storecache.IndexCache
chunkPool *pool.BytesPool

// Sets of blocks that have the same labels. They are indexed by a hash over their label set.
@@ -225,10 +226,17 @@ func NewBucketStore(
return nil, errors.Errorf("max concurrency value cannot be lower than 0 (got %v)", maxConcurrent)
}

indexCache, err := newIndexCache(reg, indexCacheSizeBytes)
// TODO(bwplotka): Add as a flag?
maxItemSizeBytes := indexCacheSizeBytes / 2

indexCache, err := storecache.NewIndexCache(logger, reg, storecache.Opts{
MaxSizeBytes: indexCacheSizeBytes,
MaxItemSizeBytes: maxItemSizeBytes,
})
if err != nil {
return nil, errors.Wrap(err, "create index cache")
}

chunkPool, err := pool.NewBytesPool(2e5, 50e6, 2, maxChunkPoolBytes)
if err != nil {
return nil, errors.Wrap(err, "create chunk pool")
@@ -1058,7 +1066,7 @@ type bucketBlock struct {
bucket objstore.BucketReader
meta *metadata.Meta
dir string
indexCache *indexCache
indexCache *storecache.IndexCache
chunkPool *pool.BytesPool

indexVersion int
@@ -1081,7 +1089,7 @@ func newBucketBlock(
bkt objstore.BucketReader,
id ulid.ULID,
dir string,
indexCache *indexCache,
indexCache *storecache.IndexCache,
chunkPool *pool.BytesPool,
p partitioner,
) (b *bucketBlock, err error) {
@@ -1241,13 +1249,13 @@ type bucketIndexReader struct {
block *bucketBlock
dec *index.Decoder
stats *queryStats
cache *indexCache
cache *storecache.IndexCache

mtx sync.Mutex
loadedSeries map[uint64][]byte
}

func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache *indexCache) *bucketIndexReader {
func newBucketIndexReader(ctx context.Context, logger log.Logger, block *bucketBlock, cache *storecache.IndexCache) *bucketIndexReader {
r := &bucketIndexReader{
logger: logger,
ctx: ctx,
@@ -1415,7 +1423,7 @@ func (r *bucketIndexReader) fetchPostings(groups []*postingGroup) error {
for i, g := range groups {
for j, key := range g.keys {
// Get postings for the given key from cache first.
if b, ok := r.cache.postings(r.block.meta.ULID, key); ok {
if b, ok := r.cache.Postings(r.block.meta.ULID, key); ok {
r.stats.postingsTouched++
r.stats.postingsTouchedSizeSum += len(b)

@@ -1487,7 +1495,7 @@ func (r *bucketIndexReader) fetchPostings(groups []*postingGroup) error {

// Return postings and fill LRU cache.
groups[p.groupID].Fill(p.keyID, fetchedPostings)
r.cache.setPostings(r.block.meta.ULID, groups[p.groupID].keys[p.keyID], c)
r.cache.SetPostings(r.block.meta.ULID, groups[p.groupID].keys[p.keyID], c)

// If we just fetched it we still have to update the stats for touched postings.
r.stats.postingsTouched++
@@ -1510,7 +1518,7 @@ func (r *bucketIndexReader) PreloadSeries(ids []uint64) error {
var newIDs []uint64

for _, id := range ids {
if b, ok := r.cache.series(r.block.meta.ULID, id); ok {
if b, ok := r.cache.Series(r.block.meta.ULID, id); ok {
r.loadedSeries[id] = b
continue
}
@@ -1567,7 +1575,7 @@ func (r *bucketIndexReader) loadSeries(ctx context.Context, ids []uint64, start,
}
c = c[n : n+int(l)]
r.loadedSeries[id] = c
r.cache.setSeries(r.block.meta.ULID, id, c)
r.cache.SetSeries(r.block.meta.ULID, id, c)
}
return nil
}
@@ -15,6 +15,7 @@ import (
"github.com/improbable-eng/thanos/pkg/objstore"
"github.com/improbable-eng/thanos/pkg/objstore/objtesting"
"github.com/improbable-eng/thanos/pkg/runutil"
storecache "github.com/improbable-eng/thanos/pkg/store/cache"
"github.com/improbable-eng/thanos/pkg/store/storepb"
"github.com/improbable-eng/thanos/pkg/testutil"
"github.com/pkg/errors"
@@ -310,7 +311,10 @@ func testBucketStore_e2e(t testing.TB, ctx context.Context, s *storeSuite) {
t.Log("Run ", i)

// Always clean cache before each test.
s.store.indexCache, err = newIndexCache(nil, 100)
s.store.indexCache, err = storecache.NewIndexCache(log.NewNopLogger(), nil, storecache.Opts{
MaxSizeBytes: 100,
MaxItemSizeBytes: 100,
})
testutil.Ok(t, err)

srv := newStoreSeriesServer(ctx)

This file was deleted.

Oops, something went wrong.
Oops, something went wrong.

0 comments on commit 7aad30d

Please sign in to comment.
You can’t perform that action at this time.