From 5eca926bc1a9550c1a73ce4a06913a86a3975ce5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 13:43:33 +0200 Subject: [PATCH 01/25] Added caching for Iter. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 88 +++++++++++++++++++++ pkg/store/cache/caching_bucket_test.go | 105 ++++++++++++++++++++++--- 2 files changed, 184 insertions(+), 9 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 2ce7e6dd11..0e90860453 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -6,6 +6,7 @@ package storecache import ( "context" "encoding/binary" + "encoding/json" "fmt" "io" "io/ioutil" @@ -14,6 +15,8 @@ import ( "time" "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -40,6 +43,9 @@ type CachingBucketConfig struct { // TTLs for various cache items. ChunkObjectSizeTTL time.Duration `yaml:"chunk_object_size_ttl"` ChunkSubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` + + // How long to cache result of Iter call. + IterTTL time.Duration `yaml:"iter_ttl"` } func DefaultCachingBucketConfig() CachingBucketConfig { @@ -48,6 +54,8 @@ func DefaultCachingBucketConfig() CachingBucketConfig { ChunkObjectSizeTTL: 24 * time.Hour, ChunkSubrangeTTL: 24 * time.Hour, MaxChunksGetRangeRequests: 3, + + IterTTL: 5 * time.Minute, } } @@ -67,6 +75,9 @@ type CachingBucket struct { objectSizeRequests prometheus.Counter objectSizeHits prometheus.Counter + + iterRequests prometheus.Counter + iterCacheHits prometheus.Counter } func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { @@ -103,6 +114,15 @@ func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConf Name: "thanos_store_bucket_cache_objectsize_hits_total", Help: "Number of object size hits for objects.", }), + + iterRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_iter_requests_total", + Help: "Number of Iter requests for directories.", + }), + iterCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_iter_cache_hits_total", + Help: "Number of Iter requests served from cache.", + }), } cb.fetchedChunkBytes.WithLabelValues(originBucket) @@ -132,6 +152,70 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur return cb.WithExpectedErrs(expectedFunc) } +func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { + cb.iterRequests.Inc() + + key := cachingKeyIter(dir) + + data := cb.cache.Fetch(ctx, []string{key}) + if data[key] != nil { + list, err := decodeIterResult(data[key]) + if err == nil { + cb.iterCacheHits.Inc() + + for _, n := range list { + err = f(n) + if err != nil { + return err + } + } + return nil + } else { + // This should not happen. + level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "err", err) + } + } + + // Iteration can take a while (esp. since it calls function), and iterTTL is generally low. + // We will compute TTL based time when iteration started. + iterTime := time.Now() + var list []string + err := cb.Bucket.Iter(ctx, dir, func(s string) error { + list = append(list, s) + return f(s) + }) + + remainingTTL := cb.config.IterTTL - time.Since(iterTime) + if err == nil && remainingTTL > 0 { + data := encodeIterResult(list) + if data != nil { + cb.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) + } + } + return err +} + +// Iter results should compress nicely, especially in subdirectories. +func encodeIterResult(files []string) []byte { + data, err := json.Marshal(files) + if err != nil { + return nil + } + + return snappy.Encode(nil, data) +} + +func decodeIterResult(data []byte) ([]string, error) { + decoded, err := snappy.Decode(nil, data) + if err != nil { + return nil, err + } + + var list []string + err = json.Unmarshal(decoded, &list) + return list, err +} + var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) func isTSDBChunkFile(name string) bool { @@ -344,6 +428,10 @@ func cachingKeyObjectSubrange(name string, start int64, end int64) string { return fmt.Sprintf("subrange:%s:%d:%d", name, start, end) } +func cachingKeyIter(name string) string { + return fmt.Sprintf("iter:%s", name) +} + // Reader implementation that uses in-memory subranges. type subrangesReader struct { subrangeSize int64 diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 2bbc37c887..698fdb1f5a 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -9,6 +9,8 @@ import ( "fmt" "io" "io/ioutil" + "sort" + "strings" "sync" "testing" "time" @@ -20,7 +22,7 @@ import ( "github.com/thanos-io/thanos/pkg/testutil" ) -func TestCachingBucket(t *testing.T) { +func TestChunksCaching(t *testing.T) { length := int64(1024 * 1024) subrangeSize := int64(16000) // All tests are based on this value. @@ -35,7 +37,7 @@ func TestCachingBucket(t *testing.T) { testutil.Ok(t, inmem.Upload(context.Background(), name, bytes.NewReader(data))) // We reuse cache between tests (!) - cache := &mockCache{cache: make(map[string][]byte)} + cache := newMockCache() // Warning, these tests must be run in order, they depend cache state from previous test. for _, tc := range []struct { @@ -106,7 +108,7 @@ func TestCachingBucket(t *testing.T) { expectedFetchedBytes: length, expectedCachedBytes: 0, // Cache is flushed. init: func() { - cache.cache = map[string][]byte{} // Flush cache. + cache.flush() }, }, @@ -249,16 +251,29 @@ func verifyGetRange(t *testing.T, cachingBucket *CachingBucket, name string, off } } +type cacheItem struct { + data []byte + exp time.Time +} + type mockCache struct { mu sync.Mutex - cache map[string][]byte + cache map[string]cacheItem } -func (m *mockCache) Store(_ context.Context, data map[string][]byte, _ time.Duration) { +func newMockCache() *mockCache { + c := &mockCache{} + c.flush() + return c +} + +func (m *mockCache) Store(_ context.Context, data map[string][]byte, ttl time.Duration) { m.mu.Lock() defer m.mu.Unlock() + + exp := time.Now().Add(ttl) for key, val := range data { - m.cache[key] = val + m.cache[key] = cacheItem{data: val, exp: exp} } } @@ -268,16 +283,21 @@ func (m *mockCache) Fetch(_ context.Context, keys []string) map[string][]byte { found := make(map[string][]byte, len(keys)) + now := time.Now() for _, k := range keys { v, ok := m.cache[k] - if ok { - found[k] = v + if ok && now.Before(v.exp) { + found[k] = v.data } } return found } +func (m *mockCache) flush() { + m.cache = map[string]cacheItem{} +} + func TestMergeRanges(t *testing.T) { for ix, tc := range []struct { input []rng @@ -315,7 +335,7 @@ func TestMergeRanges(t *testing.T) { func TestInvalidOffsetAndLength(t *testing.T) { b := &testBucket{objstore.NewInMemBucket()} - c, err := NewCachingBucket(b, &mockCache{cache: make(map[string][]byte)}, DefaultCachingBucketConfig(), nil, nil) + c, err := NewCachingBucket(b, newMockCache(), DefaultCachingBucketConfig(), nil, nil) testutil.Ok(t, err) r, err := c.GetRange(context.Background(), "test", -1, 1000) @@ -342,3 +362,70 @@ func (b *testBucket) GetRange(ctx context.Context, name string, off, length int6 return b.InMemBucket.GetRange(ctx, name, off, length) } + +func TestCachedIter(t *testing.T) { + inmem := objstore.NewInMemBucket() + testutil.Ok(t, inmem.Upload(context.Background(), "/file-1", strings.NewReader("hej"))) + testutil.Ok(t, inmem.Upload(context.Background(), "/file-2", strings.NewReader("ahoj"))) + testutil.Ok(t, inmem.Upload(context.Background(), "/file-3", strings.NewReader("hello"))) + testutil.Ok(t, inmem.Upload(context.Background(), "/file-4", strings.NewReader("ciao"))) + + allFiles := []string{"/file-1", "/file-2", "/file-3", "/file-4"} + + // We reuse cache between tests (!) + cache := newMockCache() + + config := DefaultCachingBucketConfig() + + cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + testutil.Ok(t, err) + + testIter(t, cb, allFiles, false) + + testutil.Ok(t, inmem.Upload(context.Background(), "/file-5", strings.NewReader("nazdar"))) + testIter(t, cb, allFiles, true) // Iter returns old response. + + cache.flush() + allFiles = append(allFiles, "/file-5") + testIter(t, cb, allFiles, false) + + cache.flush() + + e := errors.Errorf("test error") + + // This iteration returns false. Result will not be cached. + testutil.Equals(t, e, cb.Iter(context.Background(), "/", func(_ string) error { + return e + })) + + // Nothing cached now. + testIter(t, cb, allFiles, false) +} + +func testIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool) { + hitsBefore := int(promtest.ToFloat64(cb.iterCacheHits)) + + col := iterCollector{} + testutil.Ok(t, cb.Iter(context.Background(), "/", col.collect)) + + hitsAfter := int(promtest.ToFloat64(cb.iterCacheHits)) + + sort.Strings(col.items) + testutil.Equals(t, expectedFiles, col.items) + + expectedHitsDiff := 0 + if expectedCache { + expectedHitsDiff = 1 + } + + testutil.Equals(t, expectedHitsDiff, hitsAfter-hitsBefore) +} + +type iterCollector struct { + items []string +} + +func (it *iterCollector) collect(s string) error { + it.items = append(it.items, s) + return nil +} From b41de2c6a9f33af0f8f00e7632ecf9c696067fa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 14:06:09 +0200 Subject: [PATCH 02/25] Added cache for Exists call for meta-files. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 87 ++++++++++++++++++++++++++ pkg/store/cache/caching_bucket_test.go | 71 +++++++++++++++++++-- 2 files changed, 153 insertions(+), 5 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 0e90860453..7db2936d18 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -11,6 +11,7 @@ import ( "io" "io/ioutil" "regexp" + "strings" "sync" "time" @@ -22,6 +23,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/sync/errgroup" + "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" @@ -31,6 +33,12 @@ import ( const ( originCache = "cache" originBucket = "bucket" + + existsTrue = "true" + existsFalse = "false" + + metaFilenameSuffix = "/" + metadata.MetaFilename + deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename ) type CachingBucketConfig struct { @@ -46,6 +54,11 @@ type CachingBucketConfig struct { // How long to cache result of Iter call. IterTTL time.Duration `yaml:"iter_ttl"` + + // Meta files (meta.json and deletion mark file) caching config. + MetaFilesCachingEnabled bool `yaml:"metafile_caching_enabled"` + MetaFileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` + MetaFileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` } func DefaultCachingBucketConfig() CachingBucketConfig { @@ -56,6 +69,10 @@ func DefaultCachingBucketConfig() CachingBucketConfig { MaxChunksGetRangeRequests: 3, IterTTL: 5 * time.Minute, + + MetaFilesCachingEnabled: true, + MetaFileExistsTTL: 10 * time.Minute, + MetaFileDoesntExistTTL: 3 * time.Minute, } } @@ -78,6 +95,9 @@ type CachingBucket struct { iterRequests prometheus.Counter iterCacheHits prometheus.Counter + + metaFileExistsRequests prometheus.Counter + metaFileExistsCacheHits prometheus.Counter } func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { @@ -123,6 +143,15 @@ func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConf Name: "thanos_store_bucket_cache_iter_cache_hits_total", Help: "Number of Iter requests served from cache.", }), + + metaFileExistsRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_metafile_exists_requests_total", + Help: "Number of Exists requests for meta-files.", + }), + metaFileExistsCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_metafile_exists_cache_hits_total", + Help: "Number of Exists requests for meta-files served from cache.", + }), } cb.fetchedChunkBytes.WithLabelValues(originBucket) @@ -216,6 +245,60 @@ func decodeIterResult(data []byte) ([]string, error) { return list, err } +func isMetaFile(name string) bool { + return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) +} + +func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { + if cb.config.MetaFilesCachingEnabled && isMetaFile(name) { + cb.metaFileExistsRequests.Inc() + + key := cachingKeyExists(name) + hits := cb.cache.Fetch(ctx, []string{key}) + + if ex := hits[key]; ex != nil { + switch string(ex) { + case existsTrue: + cb.metaFileExistsCacheHits.Inc() + return true, nil + case existsFalse: + cb.metaFileExistsCacheHits.Inc() + return false, nil + default: + level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) + } + } + + existsTime := time.Now() + ok, err := cb.Bucket.Exists(ctx, name) + if err == nil { + cb.storeExistsCacheEntry(ctx, key, ok, existsTime) + } + + return ok, err + } + + return cb.Bucket.Exists(ctx, name) +} + +func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time) { + var ( + data []byte + ttl time.Duration + ) + if exists { + ttl = cb.config.MetaFileExistsTTL - time.Since(ts) + data = []byte(existsTrue) + } else { + ttl = cb.config.MetaFileDoesntExistTTL - time.Since(ts) + data = []byte(existsFalse) + } + + if ttl > 0 { + cb.cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) + } +} + var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) func isTSDBChunkFile(name string) bool { @@ -432,6 +515,10 @@ func cachingKeyIter(name string) string { return fmt.Sprintf("iter:%s", name) } +func cachingKeyExists(name string) string { + return fmt.Sprintf("exists:%s", name) +} + // Reader implementation that uses in-memory subranges. type subrangesReader struct { subrangeSize int64 diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 698fdb1f5a..6d06ddd060 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -380,14 +380,14 @@ func TestCachedIter(t *testing.T) { cb, err := NewCachingBucket(inmem, cache, config, nil, nil) testutil.Ok(t, err) - testIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false) testutil.Ok(t, inmem.Upload(context.Background(), "/file-5", strings.NewReader("nazdar"))) - testIter(t, cb, allFiles, true) // Iter returns old response. + verifyIter(t, cb, allFiles, true) // Iter returns old response. cache.flush() allFiles = append(allFiles, "/file-5") - testIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false) cache.flush() @@ -399,10 +399,10 @@ func TestCachedIter(t *testing.T) { })) // Nothing cached now. - testIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false) } -func testIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool) { +func verifyIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool) { hitsBefore := int(promtest.ToFloat64(cb.iterCacheHits)) col := iterCollector{} @@ -429,3 +429,64 @@ func (it *iterCollector) collect(s string) error { it.items = append(it.items, s) return nil } + +func TestExists(t *testing.T) { + inmem := objstore.NewInMemBucket() + + // We reuse cache between tests (!) + cache := newMockCache() + + config := DefaultCachingBucketConfig() + + cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + testutil.Ok(t, err) + + file := "/block123" + metaFilenameSuffix + verifyExists(t, cb, file, false, false) + + testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) + verifyExists(t, cb, file, false, true) // Reused cache result. + cache.flush() + verifyExists(t, cb, file, true, false) + + testutil.Ok(t, inmem.Delete(context.Background(), file)) + verifyExists(t, cb, file, true, true) // Reused cache result. + cache.flush() + verifyExists(t, cb, file, false, false) +} + +func TestExistsCachingDisabled(t *testing.T) { + inmem := objstore.NewInMemBucket() + + // We reuse cache between tests (!) + cache := newMockCache() + + config := DefaultCachingBucketConfig() + config.MetaFilesCachingEnabled = false + + cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + testutil.Ok(t, err) + + file := "/block123" + metaFilenameSuffix + verifyExists(t, cb, file, false, false) + + testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) + verifyExists(t, cb, file, true, false) + + testutil.Ok(t, inmem.Delete(context.Background(), file)) + verifyExists(t, cb, file, false, false) +} + +func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool) { + hitsBefore := int(promtest.ToFloat64(cb.metaFileExistsCacheHits)) + ok, err := cb.Exists(context.Background(), file) + testutil.Ok(t, err) + testutil.Equals(t, exists, ok) + hitsAfter := int(promtest.ToFloat64(cb.metaFileExistsCacheHits)) + + if fromCache { + testutil.Equals(t, 1, hitsAfter-hitsBefore) + } else { + testutil.Equals(t, 0, hitsAfter-hitsBefore) + } +} From 49b90d1620f41560cbee630ecebae2a86019b5ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 14:29:57 +0200 Subject: [PATCH 03/25] Added cache for reading block metadata files. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 78 ++++++++++++++++++++++++++ pkg/store/cache/caching_bucket_test.go | 59 +++++++++++++++++++ 2 files changed, 137 insertions(+) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 7db2936d18..92c44f8ea8 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -4,6 +4,7 @@ package storecache import ( + "bytes" "context" "encoding/binary" "encoding/json" @@ -41,6 +42,8 @@ const ( deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename ) +var errObjNotFound = errors.Errorf("object not found") + type CachingBucketConfig struct { // Basic unit used to cache chunks. ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"` @@ -59,6 +62,7 @@ type CachingBucketConfig struct { MetaFilesCachingEnabled bool `yaml:"metafile_caching_enabled"` MetaFileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` MetaFileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` + MetaFileContentTTL time.Duration `yaml:"metafile_content_ttl"` } func DefaultCachingBucketConfig() CachingBucketConfig { @@ -73,6 +77,7 @@ func DefaultCachingBucketConfig() CachingBucketConfig { MetaFilesCachingEnabled: true, MetaFileExistsTTL: 10 * time.Minute, MetaFileDoesntExistTTL: 3 * time.Minute, + MetaFileContentTTL: 1 * time.Hour, } } @@ -98,6 +103,10 @@ type CachingBucket struct { metaFileExistsRequests prometheus.Counter metaFileExistsCacheHits prometheus.Counter + + metaFileGetRequests prometheus.Counter + metaFileGetCacheHits prometheus.Counter + metaFileGetDoesntExistCacheHits prometheus.Counter } func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { @@ -152,6 +161,19 @@ func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConf Name: "thanos_store_bucket_cache_metafile_exists_cache_hits_total", Help: "Number of Exists requests for meta-files served from cache.", }), + + metaFileGetRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_metafile_get_requests_total", + Help: "Number of Get requests for meta-files.", + }), + metaFileGetCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_metafile_get_hits_total", + Help: "Number of Get requests for meta-files served from cache with data.", + }), + metaFileGetDoesntExistCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_metafile_get_doesnt_exist_hits_total", + Help: "Number of Get requests for meta-files served from cache with object not found error.", + }), } cb.fetchedChunkBytes.WithLabelValues(originBucket) @@ -299,6 +321,58 @@ func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey s } } +func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { + if cb.config.MetaFilesCachingEnabled && isMetaFile(name) { + cb.metaFileGetRequests.Inc() + + key := cachingKeyContent(name) + existsKey := cachingKeyExists(name) + + hits := cb.cache.Fetch(ctx, []string{key, existsKey}) + if hits[key] != nil { + cb.metaFileGetCacheHits.Inc() + return ioutil.NopCloser(bytes.NewReader(hits[key])), nil + } + + // If we know that file doesn't exist, we can return that. Useful for deletion marks. + if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { + cb.metaFileGetDoesntExistCacheHits.Inc() + return nil, errObjNotFound + } + + getTime := time.Now() + reader, err := cb.Bucket.Get(ctx, name) + if err != nil { + if cb.Bucket.IsObjNotFoundErr(err) { + // Cache that object doesn't exist. + cb.storeExistsCacheEntry(ctx, existsKey, false, getTime) + } + + return nil, err + } + defer runutil.CloseWithLogOnErr(cb.logger, reader, "CachingBucket.Get(%q)", name) + + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } + + ttl := cb.config.MetaFileContentTTL - time.Since(getTime) + if ttl > 0 { + cb.cache.Store(ctx, map[string][]byte{key: data}, ttl) + } + cb.storeExistsCacheEntry(ctx, existsKey, true, getTime) + + return ioutil.NopCloser(bytes.NewReader(data)), nil + } + + return cb.Bucket.Get(ctx, name) +} + +func (cb *CachingBucket) IsObjNotFoundErr(err error) bool { + return err == errObjNotFound || cb.Bucket.IsObjNotFoundErr(err) +} + var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) func isTSDBChunkFile(name string) bool { @@ -519,6 +593,10 @@ func cachingKeyExists(name string) string { return fmt.Sprintf("exists:%s", name) } +func cachingKeyContent(name string) string { + return fmt.Sprintf("content:%s", name) +} + // Reader implementation that uses in-memory subranges. type subrangesReader struct { subrangeSize int64 diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 6d06ddd060..637cc06923 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -19,6 +19,7 @@ import ( promtest "github.com/prometheus/client_golang/prometheus/testutil" "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" "github.com/thanos-io/thanos/pkg/testutil" ) @@ -490,3 +491,61 @@ func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fro testutil.Equals(t, 0, hitsAfter-hitsBefore) } } + +func TestGetMetafile(t *testing.T) { + inmem := objstore.NewInMemBucket() + + // We reuse cache between tests (!) + cache := newMockCache() + + config := DefaultCachingBucketConfig() + + cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + testutil.Ok(t, err) + + file := "/block123" + metaFilenameSuffix + verifyGet(t, cb, file, nil, false) + + data := []byte("hello world") + testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) + + // Even if file is now uploaded, old data is served from cache. + verifyGet(t, cb, file, nil, true) + verifyExists(t, cb, file, false, true) + + cache.flush() + + verifyGet(t, cb, file, data, false) + verifyGet(t, cb, file, data, true) + verifyExists(t, cb, file, true, true) +} + +func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool) { + hitsBefore := int(promtest.ToFloat64(cb.metaFileGetCacheHits)) + hitsDoesntExistsBefore := int(promtest.ToFloat64(cb.metaFileGetDoesntExistCacheHits)) + + r, err := cb.Get(context.Background(), file) + if expectedData == nil { + testutil.Assert(t, cb.IsObjNotFoundErr(err)) + + hitsDoesntExistsAfter := int(promtest.ToFloat64(cb.metaFileGetDoesntExistCacheHits)) + if cacheUsed { + testutil.Equals(t, 1, hitsDoesntExistsAfter-hitsDoesntExistsBefore) + } else { + testutil.Equals(t, 0, hitsDoesntExistsAfter-hitsDoesntExistsBefore) + } + } else { + testutil.Ok(t, err) + runutil.CloseWithLogOnErr(nil, r, "verifyGet") + data, err := ioutil.ReadAll(r) + testutil.Ok(t, err) + testutil.Equals(t, expectedData, data) + + hitsAfter := int(promtest.ToFloat64(cb.metaFileGetCacheHits)) + if cacheUsed { + testutil.Equals(t, 1, hitsAfter-hitsBefore) + } else { + testutil.Equals(t, 0, hitsAfter-hitsBefore) + } + } +} From eee5af29d6a8d5ee22e3e4e60caede28e8795473 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 22:07:30 +0200 Subject: [PATCH 04/25] Make caching bucket configurable with different caches for different type of objects. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 438 ++++++++++++---------- pkg/store/cache/caching_bucket_factory.go | 26 +- pkg/store/cache/caching_bucket_test.go | 4 +- 3 files changed, 264 insertions(+), 204 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 92c44f8ea8..fded0dff6c 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -11,8 +11,6 @@ import ( "fmt" "io" "io/ioutil" - "regexp" - "strings" "sync" "time" @@ -24,7 +22,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "golang.org/x/sync/errgroup" - "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/runutil" @@ -32,52 +29,49 @@ import ( ) const ( + config = "config" + originCache = "cache" originBucket = "bucket" existsTrue = "true" existsFalse = "false" - - metaFilenameSuffix = "/" + metadata.MetaFilename - deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename ) var errObjNotFound = errors.Errorf("object not found") type CachingBucketConfig struct { // Basic unit used to cache chunks. - ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"` + SubrangeSize int64 `yaml:"chunk_subrange_size"` // Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited. - MaxChunksGetRangeRequests int `yaml:"max_chunks_get_range_requests"` + MaxGetRangeRequests int `yaml:"max_chunks_get_range_requests"` // TTLs for various cache items. - ChunkObjectSizeTTL time.Duration `yaml:"chunk_object_size_ttl"` - ChunkSubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` + ObjectSizeTTL time.Duration `yaml:"chunk_object_size_ttl"` + SubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` // How long to cache result of Iter call. IterTTL time.Duration `yaml:"iter_ttl"` // Meta files (meta.json and deletion mark file) caching config. - MetaFilesCachingEnabled bool `yaml:"metafile_caching_enabled"` - MetaFileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` - MetaFileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` - MetaFileContentTTL time.Duration `yaml:"metafile_content_ttl"` + MetaFileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` + MetaFileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` + MetaFileContentTTL time.Duration `yaml:"metafile_content_ttl"` } func DefaultCachingBucketConfig() CachingBucketConfig { return CachingBucketConfig{ - ChunkSubrangeSize: 16000, // Equal to max chunk size. - ChunkObjectSizeTTL: 24 * time.Hour, - ChunkSubrangeTTL: 24 * time.Hour, - MaxChunksGetRangeRequests: 3, + SubrangeSize: 16000, // Equal to max chunk size. + ObjectSizeTTL: 24 * time.Hour, + SubrangeTTL: 24 * time.Hour, + MaxGetRangeRequests: 3, IterTTL: 5 * time.Minute, - MetaFilesCachingEnabled: true, - MetaFileExistsTTL: 10 * time.Minute, - MetaFileDoesntExistTTL: 3 * time.Minute, - MetaFileContentTTL: 1 * time.Hour, + MetaFileExistsTTL: 10 * time.Minute, + MetaFileDoesntExistTTL: 3 * time.Minute, + MetaFileContentTTL: 1 * time.Hour, } } @@ -85,104 +79,148 @@ func DefaultCachingBucketConfig() CachingBucketConfig { type CachingBucket struct { objstore.Bucket - cache cache.Cache + logger log.Logger - config CachingBucketConfig + getRangeConfigs map[string]*cacheConfig + requestedGetRangeBytes *prometheus.CounterVec + fetchedGetRangeBytes *prometheus.CounterVec + refetchedGetRangeBytes *prometheus.CounterVec - logger log.Logger + objectSizeRequests *prometheus.CounterVec + objectSizeHits *prometheus.CounterVec - requestedChunkBytes prometheus.Counter - fetchedChunkBytes *prometheus.CounterVec - refetchedChunkBytes *prometheus.CounterVec + iterConfigs map[string]*cacheConfig + iterRequests *prometheus.CounterVec + iterCacheHits *prometheus.CounterVec - objectSizeRequests prometheus.Counter - objectSizeHits prometheus.Counter + existsConfigs map[string]*cacheConfig + existsRequests *prometheus.CounterVec + existsCacheHits *prometheus.CounterVec - iterRequests prometheus.Counter - iterCacheHits prometheus.Counter + getConfigs map[string]*cacheConfig + getRequests *prometheus.CounterVec + getCacheHits *prometheus.CounterVec +} - metaFileExistsRequests prometheus.Counter - metaFileExistsCacheHits prometheus.Counter +type cacheConfig struct { + CachingBucketConfig - metaFileGetRequests prometheus.Counter - metaFileGetCacheHits prometheus.Counter - metaFileGetDoesntExistCacheHits prometheus.Counter + matcher func(name string) bool + cache cache.Cache } -func NewCachingBucket(b objstore.Bucket, c cache.Cache, chunks CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { +func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { if b == nil { return nil, errors.New("bucket is nil") } - if c == nil { - return nil, errors.New("cache is nil") - } cb := &CachingBucket{ Bucket: b, - config: chunks, - cache: c, logger: logger, - requestedChunkBytes: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_requested_chunk_bytes_total", - Help: "Total number of requested bytes for chunk data.", - }), - fetchedChunkBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_fetched_chunk_bytes_total", - Help: "Total number of fetched chunk bytes. Data from bucket is then stored to cache.", - }, []string{"origin"}), - refetchedChunkBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", + Help: "Total number of bytes requested via GetRange.", + }, []string{config}), + fetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_getrange_fetched_bytes_total", + Help: "Total number of bytes fetched because of GetRange operation. Data from bucket is then stored to cache.", + }, []string{"origin", config}), + refetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_refetched_chunk_bytes_total", - Help: "Total number of chunk bytes re-fetched from storage, despite being in cache already.", - }, []string{"origin"}), - objectSizeRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Help: "Total number of bytes re-fetched from storage because of GetRange operation, despite being in cache already.", + }, []string{"origin", config}), + + objectSizeRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_objectsize_requests_total", Help: "Number of object size requests for objects.", - }), - objectSizeHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + }, []string{config}), + objectSizeHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_objectsize_hits_total", Help: "Number of object size hits for objects.", - }), + }, []string{config}), - iterRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + iterRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_iter_requests_total", Help: "Number of Iter requests for directories.", - }), - iterCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + }, []string{config}), + iterCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_iter_cache_hits_total", Help: "Number of Iter requests served from cache.", - }), - - metaFileExistsRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_metafile_exists_requests_total", - Help: "Number of Exists requests for meta-files.", - }), - metaFileExistsCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_metafile_exists_cache_hits_total", - Help: "Number of Exists requests for meta-files served from cache.", - }), - - metaFileGetRequests: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_metafile_get_requests_total", - Help: "Number of Get requests for meta-files.", - }), - metaFileGetCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_metafile_get_hits_total", - Help: "Number of Get requests for meta-files served from cache with data.", - }), - metaFileGetDoesntExistCacheHits: promauto.With(reg).NewCounter(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_metafile_get_doesnt_exist_hits_total", - Help: "Number of Get requests for meta-files served from cache with object not found error.", - }), - } - - cb.fetchedChunkBytes.WithLabelValues(originBucket) - cb.fetchedChunkBytes.WithLabelValues(originCache) - cb.refetchedChunkBytes.WithLabelValues(originCache) + }, []string{config}), + + existsRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_exists_requests_total", + Help: "Number of Exists requests.", + }, []string{config}), + existsCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_exists_cache_hits_total", + Help: "Number of Exists requests served from cache.", + }, []string{config}), + + getRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_get_requests_total", + Help: "Number of Get requests.", + }, []string{config}), + getCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_get_hits_total", + Help: "Number of Get requests served from cache with data.", + }, []string{config}), + } return cb, nil } +func newCacheConfig(cfg CachingBucketConfig, matcher func(string) bool, cache cache.Cache) *cacheConfig { + if matcher == nil { + panic("matcher") + } + if cache == nil { + panic("cache") + } + + return &cacheConfig{ + CachingBucketConfig: cfg, + matcher: matcher, + cache: cache, + } +} + +func (cb *CachingBucket) CacheIter(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.iterConfigs[labelName] = newCacheConfig(cfg, matcher, cache) + cb.iterRequests.WithLabelValues(labelName) + cb.iterCacheHits.WithLabelValues(labelName) +} + +func (cb *CachingBucket) CacheExists(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.existsConfigs[labelName] = newCacheConfig(cfg, matcher, cache) + cb.existsRequests.WithLabelValues(labelName) + cb.existsCacheHits.WithLabelValues(labelName) +} + +func (cb *CachingBucket) CacheGet(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.getConfigs[labelName] = newCacheConfig(cfg, matcher, cache) + cb.getRequests.WithLabelValues(labelName) + cb.getCacheHits.WithLabelValues(labelName) +} + +func (cb *CachingBucket) CacheGetRange(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.getRangeConfigs[labelName] = newCacheConfig(cfg, matcher, cache) + cb.requestedGetRangeBytes.WithLabelValues(labelName) + cb.fetchedGetRangeBytes.WithLabelValues(originCache, labelName) + cb.fetchedGetRangeBytes.WithLabelValues(originBucket, labelName) + cb.refetchedGetRangeBytes.WithLabelValues(originCache, labelName) +} + +func (cb *CachingBucket) findCacheConfig(configs map[string]*cacheConfig, objectName string) (string, *cacheConfig) { + for n, cfg := range configs { + if cfg.matcher(objectName) { + return n, cfg + } + } + return "", nil +} + func (cb *CachingBucket) Name() string { return "caching: " + cb.Bucket.Name() } @@ -204,15 +242,20 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - cb.iterRequests.Inc() + lname, cfg := cb.findCacheConfig(cb.iterConfigs, dir) + if cfg == nil { + return cb.Bucket.Iter(ctx, dir, f) + } + + cb.iterRequests.WithLabelValues(lname).Inc() key := cachingKeyIter(dir) - data := cb.cache.Fetch(ctx, []string{key}) + data := cfg.cache.Fetch(ctx, []string{key}) if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { - cb.iterCacheHits.Inc() + cb.iterCacheHits.WithLabelValues(lname).Inc() for _, n := range list { err = f(n) @@ -236,11 +279,11 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er return f(s) }) - remainingTTL := cb.config.IterTTL - time.Since(iterTime) + remainingTTL := cfg.IterTTL - time.Since(iterTime) if err == nil && remainingTTL > 0 { data := encodeIterResult(list) if data != nil { - cb.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) + cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) } } return err @@ -267,126 +310,119 @@ func decodeIterResult(data []byte) ([]string, error) { return list, err } -func isMetaFile(name string) bool { - return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) -} - func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - if cb.config.MetaFilesCachingEnabled && isMetaFile(name) { - cb.metaFileExistsRequests.Inc() - - key := cachingKeyExists(name) - hits := cb.cache.Fetch(ctx, []string{key}) - - if ex := hits[key]; ex != nil { - switch string(ex) { - case existsTrue: - cb.metaFileExistsCacheHits.Inc() - return true, nil - case existsFalse: - cb.metaFileExistsCacheHits.Inc() - return false, nil - default: - level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) - } - } - - existsTime := time.Now() - ok, err := cb.Bucket.Exists(ctx, name) - if err == nil { - cb.storeExistsCacheEntry(ctx, key, ok, existsTime) + lname, cfg := cb.findCacheConfig(cb.existsConfigs, name) + if cfg == nil { + return cb.Bucket.Exists(ctx, name) + } + + cb.existsRequests.WithLabelValues(lname).Inc() + + key := cachingKeyExists(name) + hits := cfg.cache.Fetch(ctx, []string{key}) + + if ex := hits[key]; ex != nil { + switch string(ex) { + case existsTrue: + cb.existsCacheHits.WithLabelValues(lname).Inc() + return true, nil + case existsFalse: + cb.existsCacheHits.WithLabelValues(lname).Inc() + return false, nil + default: + level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) } + } - return ok, err + existsTime := time.Now() + ok, err := cb.Bucket.Exists(ctx, name) + if err == nil { + cb.storeExistsCacheEntry(ctx, key, ok, existsTime, cfg) } - return cb.Bucket.Exists(ctx, name) + return ok, err } -func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time) { +func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cfg *cacheConfig) { var ( data []byte ttl time.Duration ) if exists { - ttl = cb.config.MetaFileExistsTTL - time.Since(ts) + ttl = cfg.MetaFileExistsTTL - time.Since(ts) data = []byte(existsTrue) } else { - ttl = cb.config.MetaFileDoesntExistTTL - time.Since(ts) + ttl = cfg.MetaFileDoesntExistTTL - time.Since(ts) data = []byte(existsFalse) } if ttl > 0 { - cb.cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) + cfg.cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) } } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - if cb.config.MetaFilesCachingEnabled && isMetaFile(name) { - cb.metaFileGetRequests.Inc() + lname, cfg := cb.findCacheConfig(cb.getConfigs, name) + if cfg == nil { + return cb.Bucket.Get(ctx, name) + } - key := cachingKeyContent(name) - existsKey := cachingKeyExists(name) + cb.getRequests.WithLabelValues(lname).Inc() - hits := cb.cache.Fetch(ctx, []string{key, existsKey}) - if hits[key] != nil { - cb.metaFileGetCacheHits.Inc() - return ioutil.NopCloser(bytes.NewReader(hits[key])), nil - } + key := cachingKeyContent(name) + existsKey := cachingKeyExists(name) - // If we know that file doesn't exist, we can return that. Useful for deletion marks. - if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { - cb.metaFileGetDoesntExistCacheHits.Inc() - return nil, errObjNotFound - } + hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) + if hits[key] != nil { + cb.getCacheHits.WithLabelValues(lname).Inc() + return ioutil.NopCloser(bytes.NewReader(hits[key])), nil + } - getTime := time.Now() - reader, err := cb.Bucket.Get(ctx, name) - if err != nil { - if cb.Bucket.IsObjNotFoundErr(err) { - // Cache that object doesn't exist. - cb.storeExistsCacheEntry(ctx, existsKey, false, getTime) - } + // If we know that file doesn't exist, we can return that. Useful for deletion marks. + if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { + cb.getCacheHits.WithLabelValues(lname).Inc() + return nil, errObjNotFound + } - return nil, err + getTime := time.Now() + reader, err := cb.Bucket.Get(ctx, name) + if err != nil { + if cb.Bucket.IsObjNotFoundErr(err) { + // Cache that object doesn't exist. + cb.storeExistsCacheEntry(ctx, existsKey, false, getTime, cfg) } - defer runutil.CloseWithLogOnErr(cb.logger, reader, "CachingBucket.Get(%q)", name) - data, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } + return nil, err + } + defer runutil.CloseWithLogOnErr(cb.logger, reader, "CachingBucket.Get(%q)", name) - ttl := cb.config.MetaFileContentTTL - time.Since(getTime) - if ttl > 0 { - cb.cache.Store(ctx, map[string][]byte{key: data}, ttl) - } - cb.storeExistsCacheEntry(ctx, existsKey, true, getTime) + data, err := ioutil.ReadAll(reader) + if err != nil { + return nil, err + } - return ioutil.NopCloser(bytes.NewReader(data)), nil + ttl := cfg.MetaFileContentTTL - time.Since(getTime) + if ttl > 0 { + cfg.cache.Store(ctx, map[string][]byte{key: data}, ttl) } + cb.storeExistsCacheEntry(ctx, existsKey, true, getTime, cfg) - return cb.Bucket.Get(ctx, name) + return ioutil.NopCloser(bytes.NewReader(data)), nil } func (cb *CachingBucket) IsObjNotFoundErr(err error) bool { return err == errObjNotFound || cb.Bucket.IsObjNotFoundErr(err) } -var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) - -func isTSDBChunkFile(name string) bool { - return chunksMatcher.MatchString(name) -} - func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - if isTSDBChunkFile(name) && off >= 0 && length > 0 { + lname, cfg := cb.findCacheConfig(cb.getRangeConfigs, name) + if off >= 0 && length > 0 && cfg != nil { var ( r io.ReadCloser err error ) - tracing.DoInSpan(ctx, "cachingbucket_getrange_chunkfile", func(ctx context.Context) { - r, err = cb.getRangeChunkFile(ctx, name, off, length) + tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { + r, err = cb.cachedGetRange(ctx, name, off, length, lname, cfg) }) return r, err } @@ -394,14 +430,14 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } -func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, ttl time.Duration) (uint64, error) { +func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, labelName string, cache cache.Cache, ttl time.Duration) (uint64, error) { key := cachingKeyObjectSize(name) - cb.objectSizeRequests.Add(1) + cb.objectSizeRequests.WithLabelValues(labelName).Add(1) - hits := cb.cache.Fetch(ctx, []string{key}) + hits := cache.Fetch(ctx, []string{key}) if s := hits[key]; len(s) == 8 { - cb.objectSizeHits.Add(1) + cb.objectSizeHits.WithLabelValues(labelName).Add(1) return binary.BigEndian.Uint64(s), nil } @@ -412,15 +448,15 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, ttl var buf [8]byte binary.BigEndian.PutUint64(buf[:], size) - cb.cache.Store(ctx, map[string][]byte{key: buf[:]}, ttl) + cache.Store(ctx, map[string][]byte{key: buf[:]}, ttl) return size, nil } -func (cb *CachingBucket) getRangeChunkFile(ctx context.Context, name string, offset, length int64) (io.ReadCloser, error) { - cb.requestedChunkBytes.Add(float64(length)) +func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, lname string, cfg *cacheConfig) (io.ReadCloser, error) { + cb.requestedGetRangeBytes.WithLabelValues(lname).Add(float64(length)) - size, err := cb.cachedObjectSize(ctx, name, cb.config.ChunkObjectSizeTTL) + size, err := cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) if err != nil { return nil, errors.Wrapf(err, "failed to get size of chunk file: %s", name) } @@ -431,27 +467,27 @@ func (cb *CachingBucket) getRangeChunkFile(ctx context.Context, name string, off } // Start and end range are subrange-aligned offsets into object, that we're going to read. - startRange := (offset / cb.config.ChunkSubrangeSize) * cb.config.ChunkSubrangeSize - endRange := ((offset + length) / cb.config.ChunkSubrangeSize) * cb.config.ChunkSubrangeSize - if (offset+length)%cb.config.ChunkSubrangeSize > 0 { - endRange += cb.config.ChunkSubrangeSize + startRange := (offset / cfg.SubrangeSize) * cfg.SubrangeSize + endRange := ((offset + length) / cfg.SubrangeSize) * cfg.SubrangeSize + if (offset+length)%cfg.SubrangeSize > 0 { + endRange += cfg.SubrangeSize } // The very last subrange in the object may have length that is not divisible by subrange size. - lastSubrangeOffset := endRange - cb.config.ChunkSubrangeSize - lastSubrangeLength := int(cb.config.ChunkSubrangeSize) + lastSubrangeOffset := endRange - cfg.SubrangeSize + lastSubrangeLength := int(cfg.SubrangeSize) if uint64(endRange) > size { - lastSubrangeOffset = (int64(size) / cb.config.ChunkSubrangeSize) * cb.config.ChunkSubrangeSize + lastSubrangeOffset = (int64(size) / cfg.SubrangeSize) * cfg.SubrangeSize lastSubrangeLength = int(int64(size) - lastSubrangeOffset) } - numSubranges := (endRange - startRange) / cb.config.ChunkSubrangeSize + numSubranges := (endRange - startRange) / cfg.SubrangeSize offsetKeys := make(map[int64]string, numSubranges) keys := make([]string, 0, numSubranges) - for off := startRange; off < endRange; off += cb.config.ChunkSubrangeSize { - end := off + cb.config.ChunkSubrangeSize + for off := startRange; off < endRange; off += cfg.SubrangeSize { + end := off + cfg.SubrangeSize if end > int64(size) { end = int64(size) } @@ -462,9 +498,9 @@ func (cb *CachingBucket) getRangeChunkFile(ctx context.Context, name string, off } // Try to get all subranges from the cache. - hits := cb.cache.Fetch(ctx, keys) + hits := cfg.cache.Fetch(ctx, keys) for _, b := range hits { - cb.fetchedChunkBytes.WithLabelValues(originCache).Add(float64(len(b))) + cb.fetchedGetRangeBytes.WithLabelValues(originCache, lname).Add(float64(len(b))) } if len(hits) < len(keys) { @@ -472,13 +508,13 @@ func (cb *CachingBucket) getRangeChunkFile(ctx context.Context, name string, off hits = map[string][]byte{} } - err := cb.fetchMissingChunkSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength) + err := cb.fetchMissingChunkSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, lname, cfg) if err != nil { return nil, err } } - return ioutil.NopCloser(newSubrangesReader(cb.config.ChunkSubrangeSize, offsetKeys, hits, offset, length)), nil + return ioutil.NopCloser(newSubrangesReader(cfg.SubrangeSize, offsetKeys, hits, offset, length)), nil } type rng struct { @@ -487,19 +523,19 @@ type rng struct { // fetchMissingChunkSubranges fetches missing subranges, stores them into "hits" map // and into cache as well (using provided cacheKeys). -func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int) error { +func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, labelName string, cfg *cacheConfig) error { // Ordered list of missing sub-ranges. var missing []rng - for off := startRange; off < endRange; off += cb.config.ChunkSubrangeSize { + for off := startRange; off < endRange; off += cfg.SubrangeSize { if hits[cacheKeys[off]] == nil { - missing = append(missing, rng{start: off, end: off + cb.config.ChunkSubrangeSize}) + missing = append(missing, rng{start: off, end: off + cfg.SubrangeSize}) } } missing = mergeRanges(missing, 0) // Merge adjacent ranges. // Keep merging until we have only max number of ranges (= requests). - for limit := cb.config.ChunkSubrangeSize; cb.config.MaxChunksGetRangeRequests > 0 && len(missing) > cb.config.MaxChunksGetRangeRequests; limit = limit * 2 { + for limit := cfg.SubrangeSize; cfg.MaxGetRangeRequests > 0 && len(missing) > cfg.MaxGetRangeRequests; limit = limit * 2 { missing = mergeRanges(missing, limit) } @@ -516,7 +552,7 @@ func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name st } defer runutil.CloseWithLogOnErr(cb.logger, r, "fetching range [%d, %d]", m.start, m.end) - for off := m.start; off < m.end && gctx.Err() == nil; off += cb.config.ChunkSubrangeSize { + for off := m.start; off < m.end && gctx.Err() == nil; off += cfg.SubrangeSize { key := cacheKeys[off] if key == "" { return errors.Errorf("fetching range [%d, %d]: caching key for offset %d not found", m.start, m.end, off) @@ -529,7 +565,7 @@ func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name st // if object length isn't divisible by subrange size. subrangeData = make([]byte, lastSubrangeLength) } else { - subrangeData = make([]byte, cb.config.ChunkSubrangeSize) + subrangeData = make([]byte, cfg.SubrangeSize) } _, err := io.ReadFull(r, subrangeData) if err != nil { @@ -545,10 +581,10 @@ func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name st hitsMutex.Unlock() if storeToCache { - cb.fetchedChunkBytes.WithLabelValues(originBucket).Add(float64(len(subrangeData))) - cb.cache.Store(gctx, map[string][]byte{key: subrangeData}, cb.config.ChunkSubrangeTTL) + cb.fetchedGetRangeBytes.WithLabelValues(originBucket, labelName).Add(float64(len(subrangeData))) + cfg.cache.Store(gctx, map[string][]byte{key: subrangeData}, cfg.SubrangeTTL) } else { - cb.refetchedChunkBytes.WithLabelValues(originCache).Add(float64(len(subrangeData))) + cb.refetchedGetRangeBytes.WithLabelValues(originCache, labelName).Add(float64(len(subrangeData))) } } diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 0bc0a78458..f2a61e90a6 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -4,12 +4,16 @@ package storecache import ( + "regexp" + "strings" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "gopkg.in/yaml.v2" + "github.com/thanos-io/thanos/pkg/block/metadata" cache "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" "github.com/thanos-io/thanos/pkg/objstore" @@ -60,5 +64,25 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger return nil, errors.Errorf("unsupported cache type: %s", config.Type) } - return NewCachingBucket(bucket, c, config.CachingBucketConfig, logger, reg) + cb, err := NewCachingBucket(bucket, logger, reg) + if err != nil { + return nil, err + } + + // Configure cache. + metaFilenameSuffix := "/" + metadata.MetaFilename + deletionMarkFilenameSuffix := "/" + metadata.DeletionMarkFilename + + var isMetaFile = func(name string) bool { + return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) + } + + chunksMatcher := regexp.MustCompile(`^.*/chunks/\d+$`) + + cb.CacheGetRange("chunks", c, func(name string) bool { return chunksMatcher.MatchString(name) }, config.CachingBucketConfig) + cb.CacheExists("metafile", c, isMetaFile, config.CachingBucketConfig) + cb.CacheGet("metafile", c, isMetaFile, config.CachingBucketConfig) + cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.CachingBucketConfig) // Cache Iter requests for root. + + return cb, nil } diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 637cc06923..119b197607 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -221,8 +221,8 @@ func TestChunksCaching(t *testing.T) { } cfg := DefaultCachingBucketConfig() - cfg.ChunkSubrangeSize = subrangeSize - cfg.MaxChunksGetRangeRequests = tc.maxGetRangeRequests + cfg.SubrangeSize = subrangeSize + cfg.MaxGetRangeRequests = tc.maxGetRangeRequests cachingBucket, err := NewCachingBucket(inmem, cache, cfg, nil, nil) testutil.Ok(t, err) From ce023d6ff496eb3283f38f03cc2e1eade6af5816 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 22:24:33 +0200 Subject: [PATCH 05/25] Fixed tests. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 37 +++++----- pkg/store/cache/caching_bucket_factory.go | 22 +++--- pkg/store/cache/caching_bucket_test.go | 82 ++++++++++++----------- 3 files changed, 76 insertions(+), 65 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index fded0dff6c..8e1be788fc 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -45,19 +45,19 @@ type CachingBucketConfig struct { SubrangeSize int64 `yaml:"chunk_subrange_size"` // Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited. - MaxGetRangeRequests int `yaml:"max_chunks_get_range_requests"` + MaxGetRangeRequests int `yaml:"max_chunks_get_range_requests"` + SubrangeTTL time.Duration `yaml:"subrange_ttl"` // TTLs for various cache items. - ObjectSizeTTL time.Duration `yaml:"chunk_object_size_ttl"` - SubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` + ObjectSizeTTL time.Duration `yaml:"object_size_ttl"` // How long to cache result of Iter call. IterTTL time.Duration `yaml:"iter_ttl"` - // Meta files (meta.json and deletion mark file) caching config. - MetaFileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` - MetaFileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` - MetaFileContentTTL time.Duration `yaml:"metafile_content_ttl"` + // Config for Exists and Get opertions. + ExistsTTL time.Duration `yaml:"exists_ttl"` + DoesntExistTTL time.Duration `yaml:"doesnt_exist_ttl"` + ContentTTL time.Duration `yaml:"content_ttl"` } func DefaultCachingBucketConfig() CachingBucketConfig { @@ -69,9 +69,9 @@ func DefaultCachingBucketConfig() CachingBucketConfig { IterTTL: 5 * time.Minute, - MetaFileExistsTTL: 10 * time.Minute, - MetaFileDoesntExistTTL: 3 * time.Minute, - MetaFileContentTTL: 1 * time.Hour, + ExistsTTL: 10 * time.Minute, + DoesntExistTTL: 3 * time.Minute, + ContentTTL: 1 * time.Hour, } } @@ -109,14 +109,21 @@ type cacheConfig struct { cache cache.Cache } +// NewCachingBucket creates caching bucket with no configuration. Various "Cache*" methods configure +// this bucket to cache results of individual bucket methods. Configuration must be set before +// caching bucket is used by other objects. func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { if b == nil { return nil, errors.New("bucket is nil") } cb := &CachingBucket{ - Bucket: b, - logger: logger, + Bucket: b, + logger: logger, + getConfigs: map[string]*cacheConfig{}, + getRangeConfigs: map[string]*cacheConfig{}, + existsConfigs: map[string]*cacheConfig{}, + iterConfigs: map[string]*cacheConfig{}, requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", @@ -349,10 +356,10 @@ func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey s ttl time.Duration ) if exists { - ttl = cfg.MetaFileExistsTTL - time.Since(ts) + ttl = cfg.ExistsTTL - time.Since(ts) data = []byte(existsTrue) } else { - ttl = cfg.MetaFileDoesntExistTTL - time.Since(ts) + ttl = cfg.DoesntExistTTL - time.Since(ts) data = []byte(existsFalse) } @@ -401,7 +408,7 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e return nil, err } - ttl := cfg.MetaFileContentTTL - time.Since(getTime) + ttl := cfg.ContentTTL - time.Since(getTime) if ttl > 0 { cfg.cache.Store(ctx, map[string][]byte{key: data}, ttl) } diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index f2a61e90a6..2fa17302b4 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -24,6 +24,9 @@ type BucketCacheProvider string const ( MemcachedBucketCacheProvider BucketCacheProvider = "memcached" // Memcached cache-provider for caching bucket. + + metaFilenameSuffix = "/" + metadata.MetaFilename + deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename ) // CachingBucketWithBackendConfig is a configuration of caching bucket used by Store component. @@ -70,19 +73,18 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger } // Configure cache. - metaFilenameSuffix := "/" + metadata.MetaFilename - deletionMarkFilenameSuffix := "/" + metadata.DeletionMarkFilename - - var isMetaFile = func(name string) bool { - return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) - } - - chunksMatcher := regexp.MustCompile(`^.*/chunks/\d+$`) - - cb.CacheGetRange("chunks", c, func(name string) bool { return chunksMatcher.MatchString(name) }, config.CachingBucketConfig) + cb.CacheGetRange("chunks", c, isTSDBChunkFile, config.CachingBucketConfig) cb.CacheExists("metafile", c, isMetaFile, config.CachingBucketConfig) cb.CacheGet("metafile", c, isMetaFile, config.CachingBucketConfig) cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.CachingBucketConfig) // Cache Iter requests for root. return cb, nil } + +var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) + +func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) } + +func isMetaFile(name string) bool { + return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) +} diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 119b197607..1d1ae28c1a 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -224,20 +224,19 @@ func TestChunksCaching(t *testing.T) { cfg.SubrangeSize = subrangeSize cfg.MaxGetRangeRequests = tc.maxGetRangeRequests - cachingBucket, err := NewCachingBucket(inmem, cache, cfg, nil, nil) + cachingBucket, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) + cachingBucket.CacheGetRange("chunks", cache, isTSDBChunkFile, cfg) verifyGetRange(t, cachingBucket, name, tc.offset, tc.length, tc.expectedLength) - testutil.Equals(t, tc.expectedCachedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedChunkBytes.WithLabelValues(originCache)))) - testutil.Equals(t, tc.expectedFetchedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedChunkBytes.WithLabelValues(originBucket)))) - testutil.Equals(t, tc.expectedRefetchedBytes, int64(promtest.ToFloat64(cachingBucket.refetchedChunkBytes.WithLabelValues(originCache)))) + testutil.Equals(t, tc.expectedCachedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedGetRangeBytes.WithLabelValues(originCache, "chunks")))) + testutil.Equals(t, tc.expectedFetchedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedGetRangeBytes.WithLabelValues(originBucket, "chunks")))) + testutil.Equals(t, tc.expectedRefetchedBytes, int64(promtest.ToFloat64(cachingBucket.refetchedGetRangeBytes.WithLabelValues(originCache, "chunks")))) }) } } func verifyGetRange(t *testing.T, cachingBucket *CachingBucket, name string, offset, length int64, expectedLength int64) { - t.Helper() - r, err := cachingBucket.GetRange(context.Background(), name, offset, length) testutil.Ok(t, err) @@ -336,8 +335,9 @@ func TestMergeRanges(t *testing.T) { func TestInvalidOffsetAndLength(t *testing.T) { b := &testBucket{objstore.NewInMemBucket()} - c, err := NewCachingBucket(b, newMockCache(), DefaultCachingBucketConfig(), nil, nil) + c, err := NewCachingBucket(b, nil, nil) testutil.Ok(t, err) + c.CacheGetRange("chunks", newMockCache(), func(string) bool { return true }, DefaultCachingBucketConfig()) r, err := c.GetRange(context.Background(), "test", -1, 1000) testutil.Equals(t, nil, r) @@ -378,8 +378,9 @@ func TestCachedIter(t *testing.T) { config := DefaultCachingBucketConfig() - cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) + cb.CacheIter("dirs", cache, func(string) bool { return true }, config) verifyIter(t, cb, allFiles, false) @@ -439,21 +440,22 @@ func TestExists(t *testing.T) { config := DefaultCachingBucketConfig() - cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) + cb.CacheExists("test", cache, matchAll, config) file := "/block123" + metaFilenameSuffix - verifyExists(t, cb, file, false, false) + verifyExists(t, cb, file, false, false, "test") testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) - verifyExists(t, cb, file, false, true) // Reused cache result. + verifyExists(t, cb, file, false, true, "test") // Reused cache result. cache.flush() - verifyExists(t, cb, file, true, false) + verifyExists(t, cb, file, true, false, "test") testutil.Ok(t, inmem.Delete(context.Background(), file)) - verifyExists(t, cb, file, true, true) // Reused cache result. + verifyExists(t, cb, file, true, true, "test") // Reused cache result. cache.flush() - verifyExists(t, cb, file, false, false) + verifyExists(t, cb, file, false, false, "test") } func TestExistsCachingDisabled(t *testing.T) { @@ -462,28 +464,27 @@ func TestExistsCachingDisabled(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - config := DefaultCachingBucketConfig() - config.MetaFilesCachingEnabled = false - - cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) + cb.CacheExists("test", cache, func(string) bool { return false }, DefaultCachingBucketConfig()) file := "/block123" + metaFilenameSuffix - verifyExists(t, cb, file, false, false) + verifyExists(t, cb, file, false, false, "test") testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) - verifyExists(t, cb, file, true, false) + verifyExists(t, cb, file, true, false, "test") testutil.Ok(t, inmem.Delete(context.Background(), file)) - verifyExists(t, cb, file, false, false) + verifyExists(t, cb, file, false, false, "test") } -func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool) { - hitsBefore := int(promtest.ToFloat64(cb.metaFileExistsCacheHits)) +func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, label string) { + t.Helper() + hitsBefore := int(promtest.ToFloat64(cb.existsCacheHits.WithLabelValues(label))) ok, err := cb.Exists(context.Background(), file) testutil.Ok(t, err) testutil.Equals(t, exists, ok) - hitsAfter := int(promtest.ToFloat64(cb.metaFileExistsCacheHits)) + hitsAfter := int(promtest.ToFloat64(cb.existsCacheHits.WithLabelValues(label))) if fromCache { testutil.Equals(t, 1, hitsAfter-hitsBefore) @@ -498,41 +499,40 @@ func TestGetMetafile(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - config := DefaultCachingBucketConfig() - - cb, err := NewCachingBucket(inmem, cache, config, nil, nil) + cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) + cb.CacheGet("metafile", cache, matchAll, DefaultCachingBucketConfig()) + cb.CacheExists("metafile", cache, matchAll, DefaultCachingBucketConfig()) file := "/block123" + metaFilenameSuffix - verifyGet(t, cb, file, nil, false) + verifyGet(t, cb, file, nil, false, "test") data := []byte("hello world") testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) // Even if file is now uploaded, old data is served from cache. - verifyGet(t, cb, file, nil, true) - verifyExists(t, cb, file, false, true) + verifyGet(t, cb, file, nil, true, "metafile") + verifyExists(t, cb, file, false, true, "metafile") cache.flush() - verifyGet(t, cb, file, data, false) - verifyGet(t, cb, file, data, true) - verifyExists(t, cb, file, true, true) + verifyGet(t, cb, file, data, false, "metafile") + verifyGet(t, cb, file, data, true, "metafile") + verifyExists(t, cb, file, true, true, "metafile") } -func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool) { - hitsBefore := int(promtest.ToFloat64(cb.metaFileGetCacheHits)) - hitsDoesntExistsBefore := int(promtest.ToFloat64(cb.metaFileGetDoesntExistCacheHits)) +func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, label string) { + hitsBefore := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) r, err := cb.Get(context.Background(), file) if expectedData == nil { testutil.Assert(t, cb.IsObjNotFoundErr(err)) - hitsDoesntExistsAfter := int(promtest.ToFloat64(cb.metaFileGetDoesntExistCacheHits)) + hitsAfter := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) if cacheUsed { - testutil.Equals(t, 1, hitsDoesntExistsAfter-hitsDoesntExistsBefore) + testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { - testutil.Equals(t, 0, hitsDoesntExistsAfter-hitsDoesntExistsBefore) + testutil.Equals(t, 0, hitsAfter-hitsBefore) } } else { testutil.Ok(t, err) @@ -541,7 +541,7 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte testutil.Ok(t, err) testutil.Equals(t, expectedData, data) - hitsAfter := int(promtest.ToFloat64(cb.metaFileGetCacheHits)) + hitsAfter := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -549,3 +549,5 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte } } } + +func matchAll(string) bool { return true } From 9877d2b290e76e2a77b1b478b33a133d347f15cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 22:52:06 +0200 Subject: [PATCH 06/25] Added caching for ObjectSize. Enabled caching of index. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 90 ++++++++++++++--------- pkg/store/cache/caching_bucket_factory.go | 12 ++- pkg/store/cache/caching_bucket_test.go | 52 +++++++++++-- 3 files changed, 114 insertions(+), 40 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 8e1be788fc..a4e031d404 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -41,11 +41,11 @@ const ( var errObjNotFound = errors.Errorf("object not found") type CachingBucketConfig struct { - // Basic unit used to cache chunks. - SubrangeSize int64 `yaml:"chunk_subrange_size"` + // Basic unit used to cache subranges in GetRange method. + SubrangeSize int64 `yaml:"subrange_size"` // Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited. - MaxGetRangeRequests int `yaml:"max_chunks_get_range_requests"` + MaxGetRangeRequests int `yaml:"max_get_range_requests"` SubrangeTTL time.Duration `yaml:"subrange_ttl"` // TTLs for various cache items. @@ -81,28 +81,29 @@ type CachingBucket struct { logger log.Logger - getRangeConfigs map[string]*cacheConfig + getRangeConfigs map[string]*operationConfig requestedGetRangeBytes *prometheus.CounterVec fetchedGetRangeBytes *prometheus.CounterVec refetchedGetRangeBytes *prometheus.CounterVec + objectSizeConfigs map[string]*operationConfig objectSizeRequests *prometheus.CounterVec objectSizeHits *prometheus.CounterVec - iterConfigs map[string]*cacheConfig + iterConfigs map[string]*operationConfig iterRequests *prometheus.CounterVec iterCacheHits *prometheus.CounterVec - existsConfigs map[string]*cacheConfig + existsConfigs map[string]*operationConfig existsRequests *prometheus.CounterVec existsCacheHits *prometheus.CounterVec - getConfigs map[string]*cacheConfig + getConfigs map[string]*operationConfig getRequests *prometheus.CounterVec getCacheHits *prometheus.CounterVec } -type cacheConfig struct { +type operationConfig struct { CachingBucketConfig matcher func(name string) bool @@ -118,12 +119,14 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis } cb := &CachingBucket{ - Bucket: b, - logger: logger, - getConfigs: map[string]*cacheConfig{}, - getRangeConfigs: map[string]*cacheConfig{}, - existsConfigs: map[string]*cacheConfig{}, - iterConfigs: map[string]*cacheConfig{}, + Bucket: b, + logger: logger, + + getConfigs: map[string]*operationConfig{}, + getRangeConfigs: map[string]*operationConfig{}, + existsConfigs: map[string]*operationConfig{}, + iterConfigs: map[string]*operationConfig{}, + objectSizeConfigs: map[string]*operationConfig{}, requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", @@ -134,7 +137,7 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis Help: "Total number of bytes fetched because of GetRange operation. Data from bucket is then stored to cache.", }, []string{"origin", config}), refetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_refetched_chunk_bytes_total", + Name: "thanos_store_bucket_cache_getrange_refetched_bytes_total", Help: "Total number of bytes re-fetched from storage because of GetRange operation, despite being in cache already.", }, []string{"origin", config}), @@ -178,7 +181,7 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis return cb, nil } -func newCacheConfig(cfg CachingBucketConfig, matcher func(string) bool, cache cache.Cache) *cacheConfig { +func newCacheConfig(cfg CachingBucketConfig, matcher func(string) bool, cache cache.Cache) *operationConfig { if matcher == nil { panic("matcher") } @@ -186,7 +189,7 @@ func newCacheConfig(cfg CachingBucketConfig, matcher func(string) bool, cache ca panic("cache") } - return &cacheConfig{ + return &operationConfig{ CachingBucketConfig: cfg, matcher: matcher, cache: cache, @@ -219,7 +222,13 @@ func (cb *CachingBucket) CacheGetRange(labelName string, cache cache.Cache, matc cb.refetchedGetRangeBytes.WithLabelValues(originCache, labelName) } -func (cb *CachingBucket) findCacheConfig(configs map[string]*cacheConfig, objectName string) (string, *cacheConfig) { +func (cb *CachingBucket) CacheObjectSize(labelName string, cache cache.Cache, matcher func(name string) bool, cfg CachingBucketConfig) { + cb.objectSizeConfigs[labelName] = newCacheConfig(cfg, matcher, cache) + cb.objectSizeRequests.WithLabelValues(labelName) + cb.objectSizeHits.WithLabelValues(labelName) +} + +func (cb *CachingBucket) findCacheConfig(configs map[string]*operationConfig, objectName string) (string, *operationConfig) { for n, cfg := range configs { if cfg.matcher(objectName) { return n, cfg @@ -350,7 +359,7 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) return ok, err } -func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cfg *cacheConfig) { +func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cfg *operationConfig) { var ( data []byte ttl time.Duration @@ -422,19 +431,32 @@ func (cb *CachingBucket) IsObjNotFoundErr(err error) bool { } func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { + if off < 0 || length <= 0 { + return cb.Bucket.GetRange(ctx, name, off, length) + } + lname, cfg := cb.findCacheConfig(cb.getRangeConfigs, name) - if off >= 0 && length > 0 && cfg != nil { - var ( - r io.ReadCloser - err error - ) - tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { - r, err = cb.cachedGetRange(ctx, name, off, length, lname, cfg) - }) - return r, err + if cfg == nil { + return cb.Bucket.GetRange(ctx, name, off, length) + } + + var ( + r io.ReadCloser + err error + ) + tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { + r, err = cb.cachedGetRange(ctx, name, off, length, lname, cfg) + }) + return r, err +} + +func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { + lname, cfg := cb.findCacheConfig(cb.objectSizeConfigs, name) + if cfg == nil { + return cb.Bucket.ObjectSize(ctx, name) } - return cb.Bucket.GetRange(ctx, name, off, length) + return cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) } func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, labelName string, cache cache.Cache, ttl time.Duration) (uint64, error) { @@ -460,12 +482,12 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, labe return size, nil } -func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, lname string, cfg *cacheConfig) (io.ReadCloser, error) { +func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, lname string, cfg *operationConfig) (io.ReadCloser, error) { cb.requestedGetRangeBytes.WithLabelValues(lname).Add(float64(length)) size, err := cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) if err != nil { - return nil, errors.Wrapf(err, "failed to get size of chunk file: %s", name) + return nil, errors.Wrapf(err, "failed to get size of object: %s", name) } // If length goes over object size, adjust length. We use it later to limit number of read bytes. @@ -515,7 +537,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset hits = map[string][]byte{} } - err := cb.fetchMissingChunkSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, lname, cfg) + err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, lname, cfg) if err != nil { return nil, err } @@ -528,9 +550,9 @@ type rng struct { start, end int64 } -// fetchMissingChunkSubranges fetches missing subranges, stores them into "hits" map +// fetchMissingSubranges fetches missing subranges, stores them into "hits" map // and into cache as well (using provided cacheKeys). -func (cb *CachingBucket) fetchMissingChunkSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, labelName string, cfg *cacheConfig) error { +func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, labelName string, cfg *operationConfig) error { // Ordered list of missing sub-ranges. var missing []rng diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 2fa17302b4..4e1fdbb6b9 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -76,7 +76,13 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger cb.CacheGetRange("chunks", c, isTSDBChunkFile, config.CachingBucketConfig) cb.CacheExists("metafile", c, isMetaFile, config.CachingBucketConfig) cb.CacheGet("metafile", c, isMetaFile, config.CachingBucketConfig) - cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.CachingBucketConfig) // Cache Iter requests for root. + + // Cache Iter requests for root. + cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.CachingBucketConfig) + + // Enabling index caching. + cb.CacheObjectSize("index", c, isIndexFile, config.CachingBucketConfig) + cb.CacheGetRange("index", c, isIndexFile, config.CachingBucketConfig) return cb, nil } @@ -88,3 +94,7 @@ func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) func isMetaFile(name string) bool { return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) } + +func isIndexFile(name string) bool { + return strings.HasSuffix(name, "/index") +} diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 1d1ae28c1a..077966a1d2 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -444,7 +444,7 @@ func TestExists(t *testing.T) { testutil.Ok(t, err) cb.CacheExists("test", cache, matchAll, config) - file := "/block123" + metaFilenameSuffix + file := "/random_object" verifyExists(t, cb, file, false, false, "test") testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) @@ -468,7 +468,7 @@ func TestExistsCachingDisabled(t *testing.T) { testutil.Ok(t, err) cb.CacheExists("test", cache, func(string) bool { return false }, DefaultCachingBucketConfig()) - file := "/block123" + metaFilenameSuffix + file := "/random_object" verifyExists(t, cb, file, false, false, "test") testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) @@ -493,7 +493,7 @@ func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fro } } -func TestGetMetafile(t *testing.T) { +func TestGet(t *testing.T) { inmem := objstore.NewInMemBucket() // We reuse cache between tests (!) @@ -504,8 +504,9 @@ func TestGetMetafile(t *testing.T) { cb.CacheGet("metafile", cache, matchAll, DefaultCachingBucketConfig()) cb.CacheExists("metafile", cache, matchAll, DefaultCachingBucketConfig()) - file := "/block123" + metaFilenameSuffix - verifyGet(t, cb, file, nil, false, "test") + file := "/random_object" + verifyGet(t, cb, file, nil, false, "metafile") + verifyExists(t, cb, file, false, true, "metafile") data := []byte("hello world") testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) @@ -550,4 +551,45 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte } } +func TestObjectSize(t *testing.T) { + inmem := objstore.NewInMemBucket() + + // We reuse cache between tests (!) + cache := newMockCache() + + cb, err := NewCachingBucket(inmem, nil, nil) + testutil.Ok(t, err) + cb.CacheObjectSize("test", cache, matchAll, DefaultCachingBucketConfig()) + + file := "/random_object" + verifyObjectSize(t, cb, file, -1, false, "test") + verifyObjectSize(t, cb, file, -1, false, "test") // ObjectSize doesn't cache non-existant files. + + data := []byte("hello world") + testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) + + verifyObjectSize(t, cb, file, len(data), false, "test") + verifyObjectSize(t, cb, file, len(data), true, "test") +} + +func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, label string) { + t.Helper() + hitsBefore := int(promtest.ToFloat64(cb.objectSizeHits.WithLabelValues(label))) + + length, err := cb.ObjectSize(context.Background(), file) + if expectedLength < 0 { + testutil.Assert(t, cb.IsObjNotFoundErr(err)) + } else { + testutil.Ok(t, err) + testutil.Equals(t, uint64(expectedLength), length) + + hitsAfter := int(promtest.ToFloat64(cb.objectSizeHits.WithLabelValues(label))) + if cacheUsed { + testutil.Equals(t, 1, hitsAfter-hitsBefore) + } else { + testutil.Equals(t, 0, hitsAfter-hitsBefore) + } + } +} + func matchAll(string) bool { return true } From 23654ac4ae41400171ca6e5403aa66d28a30af51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 8 May 2020 23:03:59 +0200 Subject: [PATCH 07/25] Lint feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_test.go | 56 +++++++++++++------------- 1 file changed, 27 insertions(+), 29 deletions(-) diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 077966a1d2..a6f03af117 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -23,6 +23,8 @@ import ( "github.com/thanos-io/thanos/pkg/testutil" ) +const testFilename = "/random_object" + func TestChunksCaching(t *testing.T) { length := int64(1024 * 1024) subrangeSize := int64(16000) // All tests are based on this value. @@ -444,18 +446,17 @@ func TestExists(t *testing.T) { testutil.Ok(t, err) cb.CacheExists("test", cache, matchAll, config) - file := "/random_object" - verifyExists(t, cb, file, false, false, "test") + verifyExists(t, cb, testFilename, false, false, "test") - testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) - verifyExists(t, cb, file, false, true, "test") // Reused cache result. + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, strings.NewReader("hej"))) + verifyExists(t, cb, testFilename, false, true, "test") // Reused cache result. cache.flush() - verifyExists(t, cb, file, true, false, "test") + verifyExists(t, cb, testFilename, true, false, "test") - testutil.Ok(t, inmem.Delete(context.Background(), file)) - verifyExists(t, cb, file, true, true, "test") // Reused cache result. + testutil.Ok(t, inmem.Delete(context.Background(), testFilename)) + verifyExists(t, cb, testFilename, true, true, "test") // Reused cache result. cache.flush() - verifyExists(t, cb, file, false, false, "test") + verifyExists(t, cb, testFilename, false, false, "test") } func TestExistsCachingDisabled(t *testing.T) { @@ -468,14 +469,13 @@ func TestExistsCachingDisabled(t *testing.T) { testutil.Ok(t, err) cb.CacheExists("test", cache, func(string) bool { return false }, DefaultCachingBucketConfig()) - file := "/random_object" - verifyExists(t, cb, file, false, false, "test") + verifyExists(t, cb, testFilename, false, false, "test") - testutil.Ok(t, inmem.Upload(context.Background(), file, strings.NewReader("hej"))) - verifyExists(t, cb, file, true, false, "test") + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, strings.NewReader("hej"))) + verifyExists(t, cb, testFilename, true, false, "test") - testutil.Ok(t, inmem.Delete(context.Background(), file)) - verifyExists(t, cb, file, false, false, "test") + testutil.Ok(t, inmem.Delete(context.Background(), testFilename)) + verifyExists(t, cb, testFilename, false, false, "test") } func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, label string) { @@ -504,22 +504,21 @@ func TestGet(t *testing.T) { cb.CacheGet("metafile", cache, matchAll, DefaultCachingBucketConfig()) cb.CacheExists("metafile", cache, matchAll, DefaultCachingBucketConfig()) - file := "/random_object" - verifyGet(t, cb, file, nil, false, "metafile") - verifyExists(t, cb, file, false, true, "metafile") + verifyGet(t, cb, testFilename, nil, false, "metafile") + verifyExists(t, cb, testFilename, false, true, "metafile") data := []byte("hello world") - testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) // Even if file is now uploaded, old data is served from cache. - verifyGet(t, cb, file, nil, true, "metafile") - verifyExists(t, cb, file, false, true, "metafile") + verifyGet(t, cb, testFilename, nil, true, "metafile") + verifyExists(t, cb, testFilename, false, true, "metafile") cache.flush() - verifyGet(t, cb, file, data, false, "metafile") - verifyGet(t, cb, file, data, true, "metafile") - verifyExists(t, cb, file, true, true, "metafile") + verifyGet(t, cb, testFilename, data, false, "metafile") + verifyGet(t, cb, testFilename, data, true, "metafile") + verifyExists(t, cb, testFilename, true, true, "metafile") } func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, label string) { @@ -561,15 +560,14 @@ func TestObjectSize(t *testing.T) { testutil.Ok(t, err) cb.CacheObjectSize("test", cache, matchAll, DefaultCachingBucketConfig()) - file := "/random_object" - verifyObjectSize(t, cb, file, -1, false, "test") - verifyObjectSize(t, cb, file, -1, false, "test") // ObjectSize doesn't cache non-existant files. + verifyObjectSize(t, cb, testFilename, -1, false, "test") + verifyObjectSize(t, cb, testFilename, -1, false, "test") // ObjectSize doesn't cache non-existant files. data := []byte("hello world") - testutil.Ok(t, inmem.Upload(context.Background(), file, bytes.NewBuffer(data))) + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) - verifyObjectSize(t, cb, file, len(data), false, "test") - verifyObjectSize(t, cb, file, len(data), true, "test") + verifyObjectSize(t, cb, testFilename, len(data), false, "test") + verifyObjectSize(t, cb, testFilename, len(data), true, "test") } func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, label string) { From 374e771b8da4168c8d110cd6ae04cd9a5270941e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Sat, 9 May 2020 13:06:47 +0200 Subject: [PATCH 08/25] Use single set of metrics for all operations. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 178 ++++++++++--------------- pkg/store/cache/caching_bucket_test.go | 59 ++++---- 2 files changed, 104 insertions(+), 133 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index a4e031d404..1d372dfa79 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -81,33 +81,21 @@ type CachingBucket struct { logger log.Logger - getRangeConfigs map[string]*operationConfig requestedGetRangeBytes *prometheus.CounterVec fetchedGetRangeBytes *prometheus.CounterVec refetchedGetRangeBytes *prometheus.CounterVec - objectSizeConfigs map[string]*operationConfig - objectSizeRequests *prometheus.CounterVec - objectSizeHits *prometheus.CounterVec - - iterConfigs map[string]*operationConfig - iterRequests *prometheus.CounterVec - iterCacheHits *prometheus.CounterVec - - existsConfigs map[string]*operationConfig - existsRequests *prometheus.CounterVec - existsCacheHits *prometheus.CounterVec - - getConfigs map[string]*operationConfig - getRequests *prometheus.CounterVec - getCacheHits *prometheus.CounterVec + operationConfigs map[string][]*operationConfig + operationRequests *prometheus.CounterVec + operationHits *prometheus.CounterVec } type operationConfig struct { CachingBucketConfig - matcher func(name string) bool - cache cache.Cache + configName string + matcher func(name string) bool + cache cache.Cache } // NewCachingBucket creates caching bucket with no configuration. Various "Cache*" methods configure @@ -122,11 +110,7 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis Bucket: b, logger: logger, - getConfigs: map[string]*operationConfig{}, - getRangeConfigs: map[string]*operationConfig{}, - existsConfigs: map[string]*operationConfig{}, - iterConfigs: map[string]*operationConfig{}, - objectSizeConfigs: map[string]*operationConfig{}, + operationConfigs: map[string][]*operationConfig{}, requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", @@ -141,97 +125,73 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis Help: "Total number of bytes re-fetched from storage because of GetRange operation, despite being in cache already.", }, []string{"origin", config}), - objectSizeRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_objectsize_requests_total", - Help: "Number of object size requests for objects.", - }, []string{config}), - objectSizeHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_objectsize_hits_total", - Help: "Number of object size hits for objects.", - }, []string{config}), - - iterRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_iter_requests_total", - Help: "Number of Iter requests for directories.", - }, []string{config}), - iterCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_iter_cache_hits_total", - Help: "Number of Iter requests served from cache.", - }, []string{config}), - - existsRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_exists_requests_total", - Help: "Number of Exists requests.", - }, []string{config}), - existsCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_exists_cache_hits_total", - Help: "Number of Exists requests served from cache.", - }, []string{config}), - - getRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_get_requests_total", - Help: "Number of Get requests.", - }, []string{config}), - getCacheHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ - Name: "thanos_store_bucket_cache_get_hits_total", - Help: "Number of Get requests served from cache with data.", - }, []string{config}), + operationRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_operation_requests_total", + Help: "Number of requested operations matching given config.", + }, []string{"operation", "config"}), + operationHits: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_bucket_cache_operation_hits_total", + Help: "Number of operations served from cache for given config.", + }, []string{"operation", "config"}), } return cb, nil } -func newCacheConfig(cfg CachingBucketConfig, matcher func(string) bool, cache cache.Cache) *operationConfig { +func newCacheConfig(cfg CachingBucketConfig, configName string, matcher func(string) bool, cache cache.Cache) *operationConfig { if matcher == nil { panic("matcher") } if cache == nil { panic("cache") } + if configName == "" { + panic("empty configName") + } return &operationConfig{ CachingBucketConfig: cfg, + configName: configName, matcher: matcher, cache: cache, } } -func (cb *CachingBucket) CacheIter(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.iterConfigs[labelName] = newCacheConfig(cfg, matcher, cache) - cb.iterRequests.WithLabelValues(labelName) - cb.iterCacheHits.WithLabelValues(labelName) +func (cb *CachingBucket) addOperationConfig(operationName, configName string, cfg *operationConfig) { + cb.operationConfigs[operationName] = append(cb.operationConfigs[operationName], cfg) + cb.operationRequests.WithLabelValues(operationName, configName) + cb.operationHits.WithLabelValues(operationName, configName) +} + +func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.addOperationConfig("iter", configName, newCacheConfig(cfg, configName, matcher, cache)) } -func (cb *CachingBucket) CacheExists(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.existsConfigs[labelName] = newCacheConfig(cfg, matcher, cache) - cb.existsRequests.WithLabelValues(labelName) - cb.existsCacheHits.WithLabelValues(labelName) +func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.addOperationConfig("exists", configName, newCacheConfig(cfg, configName, matcher, cache)) } -func (cb *CachingBucket) CacheGet(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.getConfigs[labelName] = newCacheConfig(cfg, matcher, cache) - cb.getRequests.WithLabelValues(labelName) - cb.getCacheHits.WithLabelValues(labelName) +func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.addOperationConfig("get", configName, newCacheConfig(cfg, configName, matcher, cache)) } -func (cb *CachingBucket) CacheGetRange(labelName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.getRangeConfigs[labelName] = newCacheConfig(cfg, matcher, cache) - cb.requestedGetRangeBytes.WithLabelValues(labelName) - cb.fetchedGetRangeBytes.WithLabelValues(originCache, labelName) - cb.fetchedGetRangeBytes.WithLabelValues(originBucket, labelName) - cb.refetchedGetRangeBytes.WithLabelValues(originCache, labelName) +func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { + cb.addOperationConfig("getrange", configName, newCacheConfig(cfg, configName, matcher, cache)) + + cb.requestedGetRangeBytes.WithLabelValues(configName) + cb.fetchedGetRangeBytes.WithLabelValues(originCache, configName) + cb.fetchedGetRangeBytes.WithLabelValues(originBucket, configName) + cb.refetchedGetRangeBytes.WithLabelValues(originCache, configName) } -func (cb *CachingBucket) CacheObjectSize(labelName string, cache cache.Cache, matcher func(name string) bool, cfg CachingBucketConfig) { - cb.objectSizeConfigs[labelName] = newCacheConfig(cfg, matcher, cache) - cb.objectSizeRequests.WithLabelValues(labelName) - cb.objectSizeHits.WithLabelValues(labelName) +func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, cfg CachingBucketConfig) { + cb.addOperationConfig("objectsize", configName, newCacheConfig(cfg, configName, matcher, cache)) } -func (cb *CachingBucket) findCacheConfig(configs map[string]*operationConfig, objectName string) (string, *operationConfig) { - for n, cfg := range configs { +func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cfg *operationConfig) { + for _, cfg := range configs { if cfg.matcher(objectName) { - return n, cfg + return cfg.configName, cfg } } return "", nil @@ -258,12 +218,12 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - lname, cfg := cb.findCacheConfig(cb.iterConfigs, dir) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["iter"], dir) if cfg == nil { return cb.Bucket.Iter(ctx, dir, f) } - cb.iterRequests.WithLabelValues(lname).Inc() + cb.operationRequests.WithLabelValues("iter", cfgName).Inc() key := cachingKeyIter(dir) @@ -271,7 +231,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { - cb.iterCacheHits.WithLabelValues(lname).Inc() + cb.operationHits.WithLabelValues("iter", cfgName).Inc() for _, n := range list { err = f(n) @@ -327,12 +287,12 @@ func decodeIterResult(data []byte) ([]string, error) { } func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - lname, cfg := cb.findCacheConfig(cb.existsConfigs, name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["exists"], name) if cfg == nil { return cb.Bucket.Exists(ctx, name) } - cb.existsRequests.WithLabelValues(lname).Inc() + cb.operationRequests.WithLabelValues("exists", cfgName).Inc() key := cachingKeyExists(name) hits := cfg.cache.Fetch(ctx, []string{key}) @@ -340,10 +300,10 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) if ex := hits[key]; ex != nil { switch string(ex) { case existsTrue: - cb.existsCacheHits.WithLabelValues(lname).Inc() + cb.operationHits.WithLabelValues("exists", cfgName).Inc() return true, nil case existsFalse: - cb.existsCacheHits.WithLabelValues(lname).Inc() + cb.operationHits.WithLabelValues("exists", cfgName).Inc() return false, nil default: level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) @@ -378,25 +338,25 @@ func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey s } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - lname, cfg := cb.findCacheConfig(cb.getConfigs, name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["get"], name) if cfg == nil { return cb.Bucket.Get(ctx, name) } - cb.getRequests.WithLabelValues(lname).Inc() + cb.operationRequests.WithLabelValues("get", cfgName).Inc() key := cachingKeyContent(name) existsKey := cachingKeyExists(name) hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) if hits[key] != nil { - cb.getCacheHits.WithLabelValues(lname).Inc() + cb.operationHits.WithLabelValues("get", cfgName).Inc() return ioutil.NopCloser(bytes.NewReader(hits[key])), nil } // If we know that file doesn't exist, we can return that. Useful for deletion marks. if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { - cb.getCacheHits.WithLabelValues(lname).Inc() + cb.operationHits.WithLabelValues("get", cfgName).Inc() return nil, errObjNotFound } @@ -435,7 +395,7 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } - lname, cfg := cb.findCacheConfig(cb.getRangeConfigs, name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["getrange"], name) if cfg == nil { return cb.Bucket.GetRange(ctx, name, off, length) } @@ -445,13 +405,13 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length err error ) tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { - r, err = cb.cachedGetRange(ctx, name, off, length, lname, cfg) + r, err = cb.cachedGetRange(ctx, name, off, length, cfgName, cfg) }) return r, err } func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { - lname, cfg := cb.findCacheConfig(cb.objectSizeConfigs, name) + lname, cfg := cb.findCacheConfig(cb.operationConfigs["objectsize"], name) if cfg == nil { return cb.Bucket.ObjectSize(ctx, name) } @@ -459,14 +419,14 @@ func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, e return cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) } -func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, labelName string, cache cache.Cache, ttl time.Duration) (uint64, error) { +func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgName string, cache cache.Cache, ttl time.Duration) (uint64, error) { key := cachingKeyObjectSize(name) - cb.objectSizeRequests.WithLabelValues(labelName).Add(1) + cb.operationRequests.WithLabelValues("objectsize", cfgName).Add(1) hits := cache.Fetch(ctx, []string{key}) if s := hits[key]; len(s) == 8 { - cb.objectSizeHits.WithLabelValues(labelName).Add(1) + cb.operationHits.WithLabelValues("objectsize", cfgName).Add(1) return binary.BigEndian.Uint64(s), nil } @@ -482,10 +442,11 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, labe return size, nil } -func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, lname string, cfg *operationConfig) (io.ReadCloser, error) { - cb.requestedGetRangeBytes.WithLabelValues(lname).Add(float64(length)) +func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *operationConfig) (io.ReadCloser, error) { + cb.operationRequests.WithLabelValues("getrange", cfgName) + cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) - size, err := cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) + size, err := cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.ObjectSizeTTL) if err != nil { return nil, errors.Wrapf(err, "failed to get size of object: %s", name) } @@ -515,11 +476,13 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset offsetKeys := make(map[int64]string, numSubranges) keys := make([]string, 0, numSubranges) + totalRequestedBytes := int64(0) for off := startRange; off < endRange; off += cfg.SubrangeSize { end := off + cfg.SubrangeSize if end > int64(size) { end = int64(size) } + totalRequestedBytes += (end - off) k := cachingKeyObjectSubrange(name, off, end) keys = append(keys, k) @@ -527,17 +490,20 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset } // Try to get all subranges from the cache. + totalCachedBytes := int64(0) hits := cfg.cache.Fetch(ctx, keys) for _, b := range hits { - cb.fetchedGetRangeBytes.WithLabelValues(originCache, lname).Add(float64(len(b))) + totalCachedBytes += int64(len(b)) + cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) } + cb.operationHits.WithLabelValues("getrange", cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) if len(hits) < len(keys) { if hits == nil { hits = map[string][]byte{} } - err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, lname, cfg) + err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cfg) if err != nil { return nil, err } diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index a6f03af117..081a00584d 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -382,16 +382,17 @@ func TestCachedIter(t *testing.T) { cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cb.CacheIter("dirs", cache, func(string) bool { return true }, config) + const cfgName = "dirs" + cb.CacheIter(cfgName, cache, func(string) bool { return true }, config) - verifyIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false, cfgName) testutil.Ok(t, inmem.Upload(context.Background(), "/file-5", strings.NewReader("nazdar"))) - verifyIter(t, cb, allFiles, true) // Iter returns old response. + verifyIter(t, cb, allFiles, true, cfgName) // Iter returns old response. cache.flush() allFiles = append(allFiles, "/file-5") - verifyIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false, cfgName) cache.flush() @@ -403,16 +404,16 @@ func TestCachedIter(t *testing.T) { })) // Nothing cached now. - verifyIter(t, cb, allFiles, false) + verifyIter(t, cb, allFiles, false, cfgName) } -func verifyIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool) { - hitsBefore := int(promtest.ToFloat64(cb.iterCacheHits)) +func verifyIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool, cfgName string) { + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("iter", cfgName))) col := iterCollector{} testutil.Ok(t, cb.Iter(context.Background(), "/", col.collect)) - hitsAfter := int(promtest.ToFloat64(cb.iterCacheHits)) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("iter", cfgName))) sort.Strings(col.items) testutil.Equals(t, expectedFiles, col.items) @@ -467,24 +468,26 @@ func TestExistsCachingDisabled(t *testing.T) { cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cb.CacheExists("test", cache, func(string) bool { return false }, DefaultCachingBucketConfig()) - verifyExists(t, cb, testFilename, false, false, "test") + const cfgName = "test" + cb.CacheExists(cfgName, cache, func(string) bool { return false }, DefaultCachingBucketConfig()) + + verifyExists(t, cb, testFilename, false, false, cfgName) testutil.Ok(t, inmem.Upload(context.Background(), testFilename, strings.NewReader("hej"))) - verifyExists(t, cb, testFilename, true, false, "test") + verifyExists(t, cb, testFilename, true, false, cfgName) testutil.Ok(t, inmem.Delete(context.Background(), testFilename)) - verifyExists(t, cb, testFilename, false, false, "test") + verifyExists(t, cb, testFilename, false, false, cfgName) } -func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, label string) { +func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.existsCacheHits.WithLabelValues(label))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("exists", cfgName))) ok, err := cb.Exists(context.Background(), file) testutil.Ok(t, err) testutil.Equals(t, exists, ok) - hitsAfter := int(promtest.ToFloat64(cb.existsCacheHits.WithLabelValues(label))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("exists", cfgName))) if fromCache { testutil.Equals(t, 1, hitsAfter-hitsBefore) @@ -521,14 +524,14 @@ func TestGet(t *testing.T) { verifyExists(t, cb, testFilename, true, true, "metafile") } -func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, label string) { - hitsBefore := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) +func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, cfgName string) { + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) r, err := cb.Get(context.Background(), file) if expectedData == nil { testutil.Assert(t, cb.IsObjNotFoundErr(err)) - hitsAfter := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -541,7 +544,7 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte testutil.Ok(t, err) testutil.Equals(t, expectedData, data) - hitsAfter := int(promtest.ToFloat64(cb.getCacheHits.WithLabelValues(label))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -558,21 +561,23 @@ func TestObjectSize(t *testing.T) { cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cb.CacheObjectSize("test", cache, matchAll, DefaultCachingBucketConfig()) - verifyObjectSize(t, cb, testFilename, -1, false, "test") - verifyObjectSize(t, cb, testFilename, -1, false, "test") // ObjectSize doesn't cache non-existant files. + const cfgName = "test" + cb.CacheObjectSize(cfgName, cache, matchAll, DefaultCachingBucketConfig()) + + verifyObjectSize(t, cb, testFilename, -1, false, cfgName) + verifyObjectSize(t, cb, testFilename, -1, false, cfgName) // ObjectSize doesn't cache non-existant files. data := []byte("hello world") testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) - verifyObjectSize(t, cb, testFilename, len(data), false, "test") - verifyObjectSize(t, cb, testFilename, len(data), true, "test") + verifyObjectSize(t, cb, testFilename, len(data), false, cfgName) + verifyObjectSize(t, cb, testFilename, len(data), true, cfgName) } -func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, label string) { +func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.objectSizeHits.WithLabelValues(label))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("objectsize", cfgName))) length, err := cb.ObjectSize(context.Background(), file) if expectedLength < 0 { @@ -581,7 +586,7 @@ func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLeng testutil.Ok(t, err) testutil.Equals(t, uint64(expectedLength), length) - hitsAfter := int(promtest.ToFloat64(cb.objectSizeHits.WithLabelValues(label))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("objectsize", cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { From 8531c43eacc6d678f4af1b26db107b64cf8656a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Sat, 9 May 2020 13:14:05 +0200 Subject: [PATCH 09/25] Constants. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 50 ++++++++++++++----------- pkg/store/cache/caching_bucket_test.go | 52 ++++++++++++++------------ 2 files changed, 56 insertions(+), 46 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 1d372dfa79..5c21ea8d16 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -36,6 +36,12 @@ const ( existsTrue = "true" existsFalse = "false" + + operationGet = "get" + operationGetRange = "getrange" + operationIter = "iter" + operationExists = "exists" + operationObjectSize = "objectsize" ) var errObjNotFound = errors.Errorf("object not found") @@ -164,19 +170,19 @@ func (cb *CachingBucket) addOperationConfig(operationName, configName string, cf } func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig("iter", configName, newCacheConfig(cfg, configName, matcher, cache)) + cb.addOperationConfig(operationIter, configName, newCacheConfig(cfg, configName, matcher, cache)) } func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig("exists", configName, newCacheConfig(cfg, configName, matcher, cache)) + cb.addOperationConfig(operationExists, configName, newCacheConfig(cfg, configName, matcher, cache)) } func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig("get", configName, newCacheConfig(cfg, configName, matcher, cache)) + cb.addOperationConfig(operationGet, configName, newCacheConfig(cfg, configName, matcher, cache)) } func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig("getrange", configName, newCacheConfig(cfg, configName, matcher, cache)) + cb.addOperationConfig(operationGetRange, configName, newCacheConfig(cfg, configName, matcher, cache)) cb.requestedGetRangeBytes.WithLabelValues(configName) cb.fetchedGetRangeBytes.WithLabelValues(originCache, configName) @@ -185,7 +191,7 @@ func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, mat } func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig("objectsize", configName, newCacheConfig(cfg, configName, matcher, cache)) + cb.addOperationConfig(operationObjectSize, configName, newCacheConfig(cfg, configName, matcher, cache)) } func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cfg *operationConfig) { @@ -218,12 +224,12 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["iter"], dir) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationIter], dir) if cfg == nil { return cb.Bucket.Iter(ctx, dir, f) } - cb.operationRequests.WithLabelValues("iter", cfgName).Inc() + cb.operationRequests.WithLabelValues(operationIter, cfgName).Inc() key := cachingKeyIter(dir) @@ -231,7 +237,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { - cb.operationHits.WithLabelValues("iter", cfgName).Inc() + cb.operationHits.WithLabelValues(operationIter, cfgName).Inc() for _, n := range list { err = f(n) @@ -287,12 +293,12 @@ func decodeIterResult(data []byte) ([]string, error) { } func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["exists"], name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationExists], name) if cfg == nil { return cb.Bucket.Exists(ctx, name) } - cb.operationRequests.WithLabelValues("exists", cfgName).Inc() + cb.operationRequests.WithLabelValues(operationExists, cfgName).Inc() key := cachingKeyExists(name) hits := cfg.cache.Fetch(ctx, []string{key}) @@ -300,10 +306,10 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) if ex := hits[key]; ex != nil { switch string(ex) { case existsTrue: - cb.operationHits.WithLabelValues("exists", cfgName).Inc() + cb.operationHits.WithLabelValues(operationExists, cfgName).Inc() return true, nil case existsFalse: - cb.operationHits.WithLabelValues("exists", cfgName).Inc() + cb.operationHits.WithLabelValues(operationExists, cfgName).Inc() return false, nil default: level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) @@ -338,25 +344,25 @@ func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey s } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["get"], name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationGet], name) if cfg == nil { return cb.Bucket.Get(ctx, name) } - cb.operationRequests.WithLabelValues("get", cfgName).Inc() + cb.operationRequests.WithLabelValues(operationGet, cfgName).Inc() key := cachingKeyContent(name) existsKey := cachingKeyExists(name) hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) if hits[key] != nil { - cb.operationHits.WithLabelValues("get", cfgName).Inc() + cb.operationHits.WithLabelValues(operationGet, cfgName).Inc() return ioutil.NopCloser(bytes.NewReader(hits[key])), nil } // If we know that file doesn't exist, we can return that. Useful for deletion marks. if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { - cb.operationHits.WithLabelValues("get", cfgName).Inc() + cb.operationHits.WithLabelValues(operationGet, cfgName).Inc() return nil, errObjNotFound } @@ -395,7 +401,7 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs["getrange"], name) + cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationGetRange], name) if cfg == nil { return cb.Bucket.GetRange(ctx, name, off, length) } @@ -411,7 +417,7 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length } func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { - lname, cfg := cb.findCacheConfig(cb.operationConfigs["objectsize"], name) + lname, cfg := cb.findCacheConfig(cb.operationConfigs[operationObjectSize], name) if cfg == nil { return cb.Bucket.ObjectSize(ctx, name) } @@ -422,11 +428,11 @@ func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, e func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgName string, cache cache.Cache, ttl time.Duration) (uint64, error) { key := cachingKeyObjectSize(name) - cb.operationRequests.WithLabelValues("objectsize", cfgName).Add(1) + cb.operationRequests.WithLabelValues(operationObjectSize, cfgName).Inc() hits := cache.Fetch(ctx, []string{key}) if s := hits[key]; len(s) == 8 { - cb.operationHits.WithLabelValues("objectsize", cfgName).Add(1) + cb.operationHits.WithLabelValues(operationObjectSize, cfgName).Inc() return binary.BigEndian.Uint64(s), nil } @@ -443,7 +449,7 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgN } func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *operationConfig) (io.ReadCloser, error) { - cb.operationRequests.WithLabelValues("getrange", cfgName) + cb.operationRequests.WithLabelValues(operationGetRange, cfgName) cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) size, err := cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.ObjectSizeTTL) @@ -496,7 +502,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset totalCachedBytes += int64(len(b)) cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) } - cb.operationHits.WithLabelValues("getrange", cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) + cb.operationHits.WithLabelValues(operationGetRange, cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) if len(hits) < len(keys) { if hits == nil { diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 081a00584d..249df573e2 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -408,12 +408,12 @@ func TestCachedIter(t *testing.T) { } func verifyIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool, cfgName string) { - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("iter", cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationIter, cfgName))) col := iterCollector{} testutil.Ok(t, cb.Iter(context.Background(), "/", col.collect)) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("iter", cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationIter, cfgName))) sort.Strings(col.items) testutil.Equals(t, expectedFiles, col.items) @@ -445,19 +445,21 @@ func TestExists(t *testing.T) { cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cb.CacheExists("test", cache, matchAll, config) - verifyExists(t, cb, testFilename, false, false, "test") + const cfgName = "test" + cb.CacheExists(cfgName, cache, matchAll, config) + + verifyExists(t, cb, testFilename, false, false, cfgName) testutil.Ok(t, inmem.Upload(context.Background(), testFilename, strings.NewReader("hej"))) - verifyExists(t, cb, testFilename, false, true, "test") // Reused cache result. + verifyExists(t, cb, testFilename, false, true, cfgName) // Reused cache result. cache.flush() - verifyExists(t, cb, testFilename, true, false, "test") + verifyExists(t, cb, testFilename, true, false, cfgName) testutil.Ok(t, inmem.Delete(context.Background(), testFilename)) - verifyExists(t, cb, testFilename, true, true, "test") // Reused cache result. + verifyExists(t, cb, testFilename, true, true, cfgName) // Reused cache result. cache.flush() - verifyExists(t, cb, testFilename, false, false, "test") + verifyExists(t, cb, testFilename, false, false, cfgName) } func TestExistsCachingDisabled(t *testing.T) { @@ -483,11 +485,11 @@ func TestExistsCachingDisabled(t *testing.T) { func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("exists", cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationExists, cfgName))) ok, err := cb.Exists(context.Background(), file) testutil.Ok(t, err) testutil.Equals(t, exists, ok) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("exists", cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationExists, cfgName))) if fromCache { testutil.Equals(t, 1, hitsAfter-hitsBefore) @@ -504,34 +506,36 @@ func TestGet(t *testing.T) { cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cb.CacheGet("metafile", cache, matchAll, DefaultCachingBucketConfig()) - cb.CacheExists("metafile", cache, matchAll, DefaultCachingBucketConfig()) - verifyGet(t, cb, testFilename, nil, false, "metafile") - verifyExists(t, cb, testFilename, false, true, "metafile") + const cfgName = "metafile" + cb.CacheGet(cfgName, cache, matchAll, DefaultCachingBucketConfig()) + cb.CacheExists(cfgName, cache, matchAll, DefaultCachingBucketConfig()) + + verifyGet(t, cb, testFilename, nil, false, cfgName) + verifyExists(t, cb, testFilename, false, true, cfgName) data := []byte("hello world") testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) // Even if file is now uploaded, old data is served from cache. - verifyGet(t, cb, testFilename, nil, true, "metafile") - verifyExists(t, cb, testFilename, false, true, "metafile") + verifyGet(t, cb, testFilename, nil, true, cfgName) + verifyExists(t, cb, testFilename, false, true, cfgName) cache.flush() - verifyGet(t, cb, testFilename, data, false, "metafile") - verifyGet(t, cb, testFilename, data, true, "metafile") - verifyExists(t, cb, testFilename, true, true, "metafile") + verifyGet(t, cb, testFilename, data, false, cfgName) + verifyGet(t, cb, testFilename, data, true, cfgName) + verifyExists(t, cb, testFilename, true, true, cfgName) } func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, cfgName string) { - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) r, err := cb.Get(context.Background(), file) if expectedData == nil { testutil.Assert(t, cb.IsObjNotFoundErr(err)) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -544,7 +548,7 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte testutil.Ok(t, err) testutil.Equals(t, expectedData, data) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("get", cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -577,7 +581,7 @@ func TestObjectSize(t *testing.T) { func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("objectsize", cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationObjectSize, cfgName))) length, err := cb.ObjectSize(context.Background(), file) if expectedLength < 0 { @@ -586,7 +590,7 @@ func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLeng testutil.Ok(t, err) testutil.Equals(t, uint64(expectedLength), length) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues("objectsize", cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationObjectSize, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { From 9284a8f59e379a6097b69dc74d4ea4a63ffae38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Sat, 9 May 2020 13:48:24 +0200 Subject: [PATCH 10/25] Use operation specific config. Generic configuration is only for user. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 213 +++++++++++----------- pkg/store/cache/caching_bucket_factory.go | 51 +++++- pkg/store/cache/caching_bucket_test.go | 24 +-- 3 files changed, 160 insertions(+), 128 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 5c21ea8d16..0c22df5010 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -46,41 +46,6 @@ const ( var errObjNotFound = errors.Errorf("object not found") -type CachingBucketConfig struct { - // Basic unit used to cache subranges in GetRange method. - SubrangeSize int64 `yaml:"subrange_size"` - - // Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited. - MaxGetRangeRequests int `yaml:"max_get_range_requests"` - SubrangeTTL time.Duration `yaml:"subrange_ttl"` - - // TTLs for various cache items. - ObjectSizeTTL time.Duration `yaml:"object_size_ttl"` - - // How long to cache result of Iter call. - IterTTL time.Duration `yaml:"iter_ttl"` - - // Config for Exists and Get opertions. - ExistsTTL time.Duration `yaml:"exists_ttl"` - DoesntExistTTL time.Duration `yaml:"doesnt_exist_ttl"` - ContentTTL time.Duration `yaml:"content_ttl"` -} - -func DefaultCachingBucketConfig() CachingBucketConfig { - return CachingBucketConfig{ - SubrangeSize: 16000, // Equal to max chunk size. - ObjectSizeTTL: 24 * time.Hour, - SubrangeTTL: 24 * time.Hour, - MaxGetRangeRequests: 3, - - IterTTL: 5 * time.Minute, - - ExistsTTL: 10 * time.Minute, - DoesntExistTTL: 3 * time.Minute, - ContentTTL: 1 * time.Hour, - } -} - // Bucket implementation that provides some caching features, using knowledge about how Thanos accesses data. type CachingBucket struct { objstore.Bucket @@ -96,14 +61,33 @@ type CachingBucket struct { operationHits *prometheus.CounterVec } +// Generic config for single operation. type operationConfig struct { - CachingBucketConfig - configName string matcher func(name string) bool cache cache.Cache + opConfig interface{} +} + +// Operation-specific configs. +type iterConfig time.Duration + +type existsConfig struct { + existsTTL, doesntExistTTL time.Duration +} + +type getConfig struct { + contentTTL, existsTTL, doesntExistTTL time.Duration } +type getRangeConfig struct { + subrangeSize int64 + objectSizeTTL, subrangeTTL time.Duration + maxSubRequests int +} + +type objectSizeCfg time.Duration + // NewCachingBucket creates caching bucket with no configuration. Various "Cache*" methods configure // this bucket to cache results of individual bucket methods. Configuration must be set before // caching bucket is used by other objects. @@ -144,7 +128,7 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis return cb, nil } -func newCacheConfig(cfg CachingBucketConfig, configName string, matcher func(string) bool, cache cache.Cache) *operationConfig { +func (cb *CachingBucket) addOperationConfig(operationName, configName string, matcher func(string) bool, cache cache.Cache, opConfig interface{}) { if matcher == nil { panic("matcher") } @@ -154,35 +138,48 @@ func newCacheConfig(cfg CachingBucketConfig, configName string, matcher func(str if configName == "" { panic("empty configName") } - - return &operationConfig{ - CachingBucketConfig: cfg, - configName: configName, - matcher: matcher, - cache: cache, + if opConfig == nil { + panic("nil opConfig") } -} -func (cb *CachingBucket) addOperationConfig(operationName, configName string, cfg *operationConfig) { - cb.operationConfigs[operationName] = append(cb.operationConfigs[operationName], cfg) + cb.operationConfigs[operationName] = append(cb.operationConfigs[operationName], &operationConfig{ + configName: configName, + matcher: matcher, + cache: cache, + opConfig: opConfig, + }) + cb.operationRequests.WithLabelValues(operationName, configName) cb.operationHits.WithLabelValues(operationName, configName) } -func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig(operationIter, configName, newCacheConfig(cfg, configName, matcher, cache)) +// CacheIter configures caching of "Iter" operation for matching directories. +func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration) { + cb.addOperationConfig(operationIter, configName, matcher, cache, iterConfig(ttl)) } -func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig(operationExists, configName, newCacheConfig(cfg, configName, matcher, cache)) +// CacheExists configures caching of "Exists" operation for matching files. Negative values are cached as well. +func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, existsTTL, doesntExistTTL time.Duration) { + cb.addOperationConfig(operationExists, configName, matcher, cache, existsConfig{existsTTL, doesntExistTTL}) } -func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig(operationGet, configName, newCacheConfig(cfg, configName, matcher, cache)) +// CacheGet configures caching of "Get" operation for matching files. Content of the object is cached, as well as whether object exists or not. +func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, contentTTL, existsTTL, doesntExistTTL time.Duration) { + cb.addOperationConfig(operationGet, configName, matcher, cache, getConfig{contentTTL: contentTTL, existsTTL: existsTTL, doesntExistTTL: doesntExistTTL}) } -func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig(operationGetRange, configName, newCacheConfig(cfg, configName, matcher, cache)) +// CacheGetRange configures caching of "GetRange" operation. Subranges (aligned on subrange size) are cached individually. +// Since caching operation needs to know the object size to compute correct subranges, object size is cached as well. +// Single "GetRange" requests can result in multiple smaller GetRange sub-requests issued on the underlying bucket. +// MaxSubRequests specifies how many such subrequests may be issued. Values <= 0 mean there is no limit (requests +// for adjacent missing subranges are still merged). +func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, subrangeSize int64, objectSizeTTL, subrangeTTL time.Duration, maxSubRequests int) { + cb.addOperationConfig(operationGetRange, configName, matcher, cache, getRangeConfig{ + subrangeSize: subrangeSize, + objectSizeTTL: objectSizeTTL, + subrangeTTL: subrangeTTL, + maxSubRequests: maxSubRequests, + }) cb.requestedGetRangeBytes.WithLabelValues(configName) cb.fetchedGetRangeBytes.WithLabelValues(originCache, configName) @@ -190,17 +187,18 @@ func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, mat cb.refetchedGetRangeBytes.WithLabelValues(originCache, configName) } -func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, cfg CachingBucketConfig) { - cb.addOperationConfig(operationObjectSize, configName, newCacheConfig(cfg, configName, matcher, cache)) +// CacheObjectSize configures caching of "ObjectSize" operation for matching files. +func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, ttl time.Duration) { + cb.addOperationConfig(operationObjectSize, configName, matcher, cache, objectSizeCfg(ttl)) } -func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cfg *operationConfig) { +func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cache cache.Cache, cfg interface{}) { for _, cfg := range configs { if cfg.matcher(objectName) { - return cfg.configName, cfg + return cfg.configName, cfg.cache, cfg.opConfig } } - return "", nil + return "", nil, nil } func (cb *CachingBucket) Name() string { @@ -224,16 +222,17 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationIter], dir) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationIter], dir) if cfg == nil { return cb.Bucket.Iter(ctx, dir, f) } + iterCfg := cfg.(iterConfig) cb.operationRequests.WithLabelValues(operationIter, cfgName).Inc() key := cachingKeyIter(dir) - data := cfg.cache.Fetch(ctx, []string{key}) + data := cache.Fetch(ctx, []string{key}) if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { @@ -261,11 +260,11 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er return f(s) }) - remainingTTL := cfg.IterTTL - time.Since(iterTime) + remainingTTL := time.Duration(iterCfg) - time.Since(iterTime) if err == nil && remainingTTL > 0 { data := encodeIterResult(list) if data != nil { - cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) + cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) } } return err @@ -293,15 +292,16 @@ func decodeIterResult(data []byte) ([]string, error) { } func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationExists], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationExists], name) if cfg == nil { return cb.Bucket.Exists(ctx, name) } + existsCfg := cfg.(existsConfig) cb.operationRequests.WithLabelValues(operationExists, cfgName).Inc() key := cachingKeyExists(name) - hits := cfg.cache.Fetch(ctx, []string{key}) + hits := cache.Fetch(ctx, []string{key}) if ex := hits[key]; ex != nil { switch string(ex) { @@ -319,42 +319,43 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) existsTime := time.Now() ok, err := cb.Bucket.Exists(ctx, name) if err == nil { - cb.storeExistsCacheEntry(ctx, key, ok, existsTime, cfg) + storeExistsCacheEntry(ctx, key, ok, existsTime, cache, existsCfg.existsTTL, existsCfg.doesntExistTTL) } return ok, err } -func (cb *CachingBucket) storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cfg *operationConfig) { +func storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cache cache.Cache, existsTTL, doesntExistTTL time.Duration) { var ( data []byte ttl time.Duration ) if exists { - ttl = cfg.ExistsTTL - time.Since(ts) + ttl = existsTTL - time.Since(ts) data = []byte(existsTrue) } else { - ttl = cfg.DoesntExistTTL - time.Since(ts) + ttl = doesntExistTTL - time.Since(ts) data = []byte(existsFalse) } if ttl > 0 { - cfg.cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) + cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) } } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationGet], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationGet], name) if cfg == nil { return cb.Bucket.Get(ctx, name) } + getCfg := cfg.(getConfig) cb.operationRequests.WithLabelValues(operationGet, cfgName).Inc() key := cachingKeyContent(name) existsKey := cachingKeyExists(name) - hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) + hits := cache.Fetch(ctx, []string{key, existsKey}) if hits[key] != nil { cb.operationHits.WithLabelValues(operationGet, cfgName).Inc() return ioutil.NopCloser(bytes.NewReader(hits[key])), nil @@ -371,7 +372,7 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e if err != nil { if cb.Bucket.IsObjNotFoundErr(err) { // Cache that object doesn't exist. - cb.storeExistsCacheEntry(ctx, existsKey, false, getTime, cfg) + storeExistsCacheEntry(ctx, existsKey, false, getTime, cache, getCfg.existsTTL, getCfg.doesntExistTTL) } return nil, err @@ -383,11 +384,11 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e return nil, err } - ttl := cfg.ContentTTL - time.Since(getTime) + ttl := getCfg.contentTTL - time.Since(getTime) if ttl > 0 { - cfg.cache.Store(ctx, map[string][]byte{key: data}, ttl) + cache.Store(ctx, map[string][]byte{key: data}, ttl) } - cb.storeExistsCacheEntry(ctx, existsKey, true, getTime, cfg) + storeExistsCacheEntry(ctx, existsKey, true, getTime, cache, getCfg.existsTTL, getCfg.doesntExistTTL) return ioutil.NopCloser(bytes.NewReader(data)), nil } @@ -401,28 +402,30 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } - cfgName, cfg := cb.findCacheConfig(cb.operationConfigs[operationGetRange], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationGetRange], name) if cfg == nil { return cb.Bucket.GetRange(ctx, name, off, length) } + rangeCfg := cfg.(getRangeConfig) var ( r io.ReadCloser err error ) tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { - r, err = cb.cachedGetRange(ctx, name, off, length, cfgName, cfg) + r, err = cb.cachedGetRange(ctx, name, off, length, cfgName, cache, rangeCfg) }) return r, err } func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { - lname, cfg := cb.findCacheConfig(cb.operationConfigs[operationObjectSize], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationObjectSize], name) if cfg == nil { return cb.Bucket.ObjectSize(ctx, name) } + osCfg := cfg.(objectSizeCfg) - return cb.cachedObjectSize(ctx, name, lname, cfg.cache, cfg.ObjectSizeTTL) + return cb.cachedObjectSize(ctx, name, cfgName, cache, time.Duration(osCfg)) } func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgName string, cache cache.Cache, ttl time.Duration) (uint64, error) { @@ -448,11 +451,11 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgN return size, nil } -func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *operationConfig) (io.ReadCloser, error) { +func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cache cache.Cache, cfg getRangeConfig) (io.ReadCloser, error) { cb.operationRequests.WithLabelValues(operationGetRange, cfgName) cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) - size, err := cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.ObjectSizeTTL) + size, err := cb.cachedObjectSize(ctx, name, cfgName, cache, cfg.objectSizeTTL) if err != nil { return nil, errors.Wrapf(err, "failed to get size of object: %s", name) } @@ -463,28 +466,28 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset } // Start and end range are subrange-aligned offsets into object, that we're going to read. - startRange := (offset / cfg.SubrangeSize) * cfg.SubrangeSize - endRange := ((offset + length) / cfg.SubrangeSize) * cfg.SubrangeSize - if (offset+length)%cfg.SubrangeSize > 0 { - endRange += cfg.SubrangeSize + startRange := (offset / cfg.subrangeSize) * cfg.subrangeSize + endRange := ((offset + length) / cfg.subrangeSize) * cfg.subrangeSize + if (offset+length)%cfg.subrangeSize > 0 { + endRange += cfg.subrangeSize } // The very last subrange in the object may have length that is not divisible by subrange size. - lastSubrangeOffset := endRange - cfg.SubrangeSize - lastSubrangeLength := int(cfg.SubrangeSize) + lastSubrangeOffset := endRange - cfg.subrangeSize + lastSubrangeLength := int(cfg.subrangeSize) if uint64(endRange) > size { - lastSubrangeOffset = (int64(size) / cfg.SubrangeSize) * cfg.SubrangeSize + lastSubrangeOffset = (int64(size) / cfg.subrangeSize) * cfg.subrangeSize lastSubrangeLength = int(int64(size) - lastSubrangeOffset) } - numSubranges := (endRange - startRange) / cfg.SubrangeSize + numSubranges := (endRange - startRange) / cfg.subrangeSize offsetKeys := make(map[int64]string, numSubranges) keys := make([]string, 0, numSubranges) totalRequestedBytes := int64(0) - for off := startRange; off < endRange; off += cfg.SubrangeSize { - end := off + cfg.SubrangeSize + for off := startRange; off < endRange; off += cfg.subrangeSize { + end := off + cfg.subrangeSize if end > int64(size) { end = int64(size) } @@ -497,7 +500,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset // Try to get all subranges from the cache. totalCachedBytes := int64(0) - hits := cfg.cache.Fetch(ctx, keys) + hits := cache.Fetch(ctx, keys) for _, b := range hits { totalCachedBytes += int64(len(b)) cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) @@ -509,13 +512,13 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset hits = map[string][]byte{} } - err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cfg) + err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cache, cfg) if err != nil { return nil, err } } - return ioutil.NopCloser(newSubrangesReader(cfg.SubrangeSize, offsetKeys, hits, offset, length)), nil + return ioutil.NopCloser(newSubrangesReader(cfg.subrangeSize, offsetKeys, hits, offset, length)), nil } type rng struct { @@ -524,19 +527,19 @@ type rng struct { // fetchMissingSubranges fetches missing subranges, stores them into "hits" map // and into cache as well (using provided cacheKeys). -func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, labelName string, cfg *operationConfig) error { +func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, cfgName string, cache cache.Cache, cfg getRangeConfig) error { // Ordered list of missing sub-ranges. var missing []rng - for off := startRange; off < endRange; off += cfg.SubrangeSize { + for off := startRange; off < endRange; off += cfg.subrangeSize { if hits[cacheKeys[off]] == nil { - missing = append(missing, rng{start: off, end: off + cfg.SubrangeSize}) + missing = append(missing, rng{start: off, end: off + cfg.subrangeSize}) } } missing = mergeRanges(missing, 0) // Merge adjacent ranges. // Keep merging until we have only max number of ranges (= requests). - for limit := cfg.SubrangeSize; cfg.MaxGetRangeRequests > 0 && len(missing) > cfg.MaxGetRangeRequests; limit = limit * 2 { + for limit := cfg.subrangeSize; cfg.maxSubRequests > 0 && len(missing) > cfg.maxSubRequests; limit = limit * 2 { missing = mergeRanges(missing, limit) } @@ -553,7 +556,7 @@ func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, } defer runutil.CloseWithLogOnErr(cb.logger, r, "fetching range [%d, %d]", m.start, m.end) - for off := m.start; off < m.end && gctx.Err() == nil; off += cfg.SubrangeSize { + for off := m.start; off < m.end && gctx.Err() == nil; off += cfg.subrangeSize { key := cacheKeys[off] if key == "" { return errors.Errorf("fetching range [%d, %d]: caching key for offset %d not found", m.start, m.end, off) @@ -566,7 +569,7 @@ func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, // if object length isn't divisible by subrange size. subrangeData = make([]byte, lastSubrangeLength) } else { - subrangeData = make([]byte, cfg.SubrangeSize) + subrangeData = make([]byte, cfg.subrangeSize) } _, err := io.ReadFull(r, subrangeData) if err != nil { @@ -582,10 +585,10 @@ func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, hitsMutex.Unlock() if storeToCache { - cb.fetchedGetRangeBytes.WithLabelValues(originBucket, labelName).Add(float64(len(subrangeData))) - cfg.cache.Store(gctx, map[string][]byte{key: subrangeData}, cfg.SubrangeTTL) + cb.fetchedGetRangeBytes.WithLabelValues(originBucket, cfgName).Add(float64(len(subrangeData))) + cache.Store(gctx, map[string][]byte{key: subrangeData}, cfg.subrangeTTL) } else { - cb.refetchedGetRangeBytes.WithLabelValues(originCache, labelName).Add(float64(len(subrangeData))) + cb.refetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(subrangeData))) } } diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 4e1fdbb6b9..9fe370558b 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -6,6 +6,7 @@ package storecache import ( "regexp" "strings" + "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -37,6 +38,41 @@ type CachingBucketWithBackendConfig struct { CachingBucketConfig CachingBucketConfig `yaml:"caching_config"` } +type CachingBucketConfig struct { + // Basic unit used to cache chunks. + ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"` + + // Maximum number of GetRange requests issued by this bucket for single GetRange call. Zero or negative value = unlimited. + MaxChunksGetRangeRequests int `yaml:"max_chunks_get_range_requests"` + + // TTLs for various cache items. + ChunkObjectSizeTTL time.Duration `yaml:"chunk_object_size_ttl"` + ChunkSubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` + + // How long to cache result of Iter call in root directory. + RootIterTTL time.Duration `yaml:"root_iter_ttl"` + + // Config for Exists and Get operations for metadata files. + MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` + MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` + MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` +} + +func DefaultCachingBucketConfig() CachingBucketConfig { + return CachingBucketConfig{ + ChunkSubrangeSize: 16000, // Equal to max chunk size. + ChunkObjectSizeTTL: 24 * time.Hour, + ChunkSubrangeTTL: 24 * time.Hour, + MaxChunksGetRangeRequests: 3, + + RootIterTTL: 5 * time.Minute, + + MetafileExistsTTL: 10 * time.Minute, + MetafileDoesntExistTTL: 3 * time.Minute, + MetafileContentTTL: 1 * time.Hour, + } +} + // NewCachingBucketFromYaml uses YAML configuration to create new caching bucket. func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) { level.Info(logger).Log("msg", "loading caching bucket configuration") @@ -72,17 +108,18 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger return nil, err } + cbc := config.CachingBucketConfig // Configure cache. - cb.CacheGetRange("chunks", c, isTSDBChunkFile, config.CachingBucketConfig) - cb.CacheExists("metafile", c, isMetaFile, config.CachingBucketConfig) - cb.CacheGet("metafile", c, isMetaFile, config.CachingBucketConfig) + cb.CacheGetRange("chunks", c, isTSDBChunkFile, cbc.ChunkSubrangeSize, cbc.ChunkObjectSizeTTL, cbc.ChunkSubrangeTTL, cbc.MaxChunksGetRangeRequests) + cb.CacheExists("metafile", c, isMetaFile, cbc.MetafileExistsTTL, cbc.MetafileDoesntExistTTL) + cb.CacheGet("metafile", c, isMetaFile, cbc.MetafileContentTTL, cbc.MetafileExistsTTL, cbc.MetafileDoesntExistTTL) // Cache Iter requests for root. - cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.CachingBucketConfig) + cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, cbc.RootIterTTL) - // Enabling index caching. - cb.CacheObjectSize("index", c, isIndexFile, config.CachingBucketConfig) - cb.CacheGetRange("index", c, isIndexFile, config.CachingBucketConfig) + // Enabling index caching (example). + cb.CacheObjectSize("index", c, isIndexFile, time.Hour) + cb.CacheGetRange("index", c, isIndexFile, 32000, time.Hour, 24*time.Hour, 3) return cb, nil } diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 249df573e2..e42a52e85d 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -222,13 +222,9 @@ func TestChunksCaching(t *testing.T) { tc.init() } - cfg := DefaultCachingBucketConfig() - cfg.SubrangeSize = subrangeSize - cfg.MaxGetRangeRequests = tc.maxGetRangeRequests - cachingBucket, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) - cachingBucket.CacheGetRange("chunks", cache, isTSDBChunkFile, cfg) + cachingBucket.CacheGetRange("chunks", cache, isTSDBChunkFile, subrangeSize, time.Hour, time.Hour, tc.maxGetRangeRequests) verifyGetRange(t, cachingBucket, name, tc.offset, tc.length, tc.expectedLength) testutil.Equals(t, tc.expectedCachedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedGetRangeBytes.WithLabelValues(originCache, "chunks")))) @@ -339,7 +335,7 @@ func TestInvalidOffsetAndLength(t *testing.T) { b := &testBucket{objstore.NewInMemBucket()} c, err := NewCachingBucket(b, nil, nil) testutil.Ok(t, err) - c.CacheGetRange("chunks", newMockCache(), func(string) bool { return true }, DefaultCachingBucketConfig()) + c.CacheGetRange("chunks", newMockCache(), func(string) bool { return true }, 10000, time.Hour, time.Hour, 3) r, err := c.GetRange(context.Background(), "test", -1, 1000) testutil.Equals(t, nil, r) @@ -378,12 +374,10 @@ func TestCachedIter(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - config := DefaultCachingBucketConfig() - cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) const cfgName = "dirs" - cb.CacheIter(cfgName, cache, func(string) bool { return true }, config) + cb.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute) verifyIter(t, cb, allFiles, false, cfgName) @@ -441,13 +435,11 @@ func TestExists(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - config := DefaultCachingBucketConfig() - cb, err := NewCachingBucket(inmem, nil, nil) testutil.Ok(t, err) const cfgName = "test" - cb.CacheExists(cfgName, cache, matchAll, config) + cb.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) verifyExists(t, cb, testFilename, false, false, cfgName) @@ -472,7 +464,7 @@ func TestExistsCachingDisabled(t *testing.T) { testutil.Ok(t, err) const cfgName = "test" - cb.CacheExists(cfgName, cache, func(string) bool { return false }, DefaultCachingBucketConfig()) + cb.CacheExists(cfgName, cache, func(string) bool { return false }, 10*time.Minute, 2*time.Minute) verifyExists(t, cb, testFilename, false, false, cfgName) @@ -508,8 +500,8 @@ func TestGet(t *testing.T) { testutil.Ok(t, err) const cfgName = "metafile" - cb.CacheGet(cfgName, cache, matchAll, DefaultCachingBucketConfig()) - cb.CacheExists(cfgName, cache, matchAll, DefaultCachingBucketConfig()) + cb.CacheGet(cfgName, cache, matchAll, 10*time.Minute, 10*time.Minute, 2*time.Minute) + cb.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) verifyGet(t, cb, testFilename, nil, false, cfgName) verifyExists(t, cb, testFilename, false, true, cfgName) @@ -567,7 +559,7 @@ func TestObjectSize(t *testing.T) { testutil.Ok(t, err) const cfgName = "test" - cb.CacheObjectSize(cfgName, cache, matchAll, DefaultCachingBucketConfig()) + cb.CacheObjectSize(cfgName, cache, matchAll, time.Minute) verifyObjectSize(t, cb, testFilename, -1, false, cfgName) verifyObjectSize(t, cb, testFilename, -1, false, cfgName) // ObjectSize doesn't cache non-existant files. From ed2ece9db3273361dc080ef912813d91caa68fc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Sat, 9 May 2020 13:57:43 +0200 Subject: [PATCH 11/25] Fix typo, make lint happy. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index e42a52e85d..8c641694e5 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -562,7 +562,7 @@ func TestObjectSize(t *testing.T) { cb.CacheObjectSize(cfgName, cache, matchAll, time.Minute) verifyObjectSize(t, cb, testFilename, -1, false, cfgName) - verifyObjectSize(t, cb, testFilename, -1, false, cfgName) // ObjectSize doesn't cache non-existant files. + verifyObjectSize(t, cb, testFilename, -1, false, cfgName) // ObjectSize doesn't cache non-existent files. data := []byte("hello world") testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) From 73ee7620dc5ebed673cb143000bd0952b11724ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 16:04:16 +0200 Subject: [PATCH 12/25] Simplify constants. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 62 +++++++++++++------------- pkg/store/cache/caching_bucket_test.go | 18 ++++---- 2 files changed, 39 insertions(+), 41 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 0c22df5010..411f4f54a6 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -29,19 +29,17 @@ import ( ) const ( - config = "config" - originCache = "cache" originBucket = "bucket" existsTrue = "true" existsFalse = "false" - operationGet = "get" - operationGetRange = "getrange" - operationIter = "iter" - operationExists = "exists" - operationObjectSize = "objectsize" + opGet = "get" + opGetRange = "getrange" + opIter = "iter" + opExists = "exists" + opObjectSize = "objectsize" ) var errObjNotFound = errors.Errorf("object not found") @@ -105,15 +103,15 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis requestedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_requested_bytes_total", Help: "Total number of bytes requested via GetRange.", - }, []string{config}), + }, []string{"config"}), fetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_fetched_bytes_total", Help: "Total number of bytes fetched because of GetRange operation. Data from bucket is then stored to cache.", - }, []string{"origin", config}), + }, []string{"origin", "config"}), refetchedGetRangeBytes: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_getrange_refetched_bytes_total", Help: "Total number of bytes re-fetched from storage because of GetRange operation, despite being in cache already.", - }, []string{"origin", config}), + }, []string{"origin", "config"}), operationRequests: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_bucket_cache_operation_requests_total", @@ -155,17 +153,17 @@ func (cb *CachingBucket) addOperationConfig(operationName, configName string, ma // CacheIter configures caching of "Iter" operation for matching directories. func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration) { - cb.addOperationConfig(operationIter, configName, matcher, cache, iterConfig(ttl)) + cb.addOperationConfig(opIter, configName, matcher, cache, iterConfig(ttl)) } // CacheExists configures caching of "Exists" operation for matching files. Negative values are cached as well. func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, existsTTL, doesntExistTTL time.Duration) { - cb.addOperationConfig(operationExists, configName, matcher, cache, existsConfig{existsTTL, doesntExistTTL}) + cb.addOperationConfig(opExists, configName, matcher, cache, existsConfig{existsTTL, doesntExistTTL}) } // CacheGet configures caching of "Get" operation for matching files. Content of the object is cached, as well as whether object exists or not. func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, contentTTL, existsTTL, doesntExistTTL time.Duration) { - cb.addOperationConfig(operationGet, configName, matcher, cache, getConfig{contentTTL: contentTTL, existsTTL: existsTTL, doesntExistTTL: doesntExistTTL}) + cb.addOperationConfig(opGet, configName, matcher, cache, getConfig{contentTTL: contentTTL, existsTTL: existsTTL, doesntExistTTL: doesntExistTTL}) } // CacheGetRange configures caching of "GetRange" operation. Subranges (aligned on subrange size) are cached individually. @@ -174,7 +172,7 @@ func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher // MaxSubRequests specifies how many such subrequests may be issued. Values <= 0 mean there is no limit (requests // for adjacent missing subranges are still merged). func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, subrangeSize int64, objectSizeTTL, subrangeTTL time.Duration, maxSubRequests int) { - cb.addOperationConfig(operationGetRange, configName, matcher, cache, getRangeConfig{ + cb.addOperationConfig(opGetRange, configName, matcher, cache, getRangeConfig{ subrangeSize: subrangeSize, objectSizeTTL: objectSizeTTL, subrangeTTL: subrangeTTL, @@ -189,7 +187,7 @@ func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, mat // CacheObjectSize configures caching of "ObjectSize" operation for matching files. func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, ttl time.Duration) { - cb.addOperationConfig(operationObjectSize, configName, matcher, cache, objectSizeCfg(ttl)) + cb.addOperationConfig(opObjectSize, configName, matcher, cache, objectSizeCfg(ttl)) } func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cache cache.Cache, cfg interface{}) { @@ -222,13 +220,13 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationIter], dir) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opIter], dir) if cfg == nil { return cb.Bucket.Iter(ctx, dir, f) } iterCfg := cfg.(iterConfig) - cb.operationRequests.WithLabelValues(operationIter, cfgName).Inc() + cb.operationRequests.WithLabelValues(opIter, cfgName).Inc() key := cachingKeyIter(dir) @@ -236,7 +234,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { - cb.operationHits.WithLabelValues(operationIter, cfgName).Inc() + cb.operationHits.WithLabelValues(opIter, cfgName).Inc() for _, n := range list { err = f(n) @@ -292,13 +290,13 @@ func decodeIterResult(data []byte) ([]string, error) { } func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationExists], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opExists], name) if cfg == nil { return cb.Bucket.Exists(ctx, name) } existsCfg := cfg.(existsConfig) - cb.operationRequests.WithLabelValues(operationExists, cfgName).Inc() + cb.operationRequests.WithLabelValues(opExists, cfgName).Inc() key := cachingKeyExists(name) hits := cache.Fetch(ctx, []string{key}) @@ -306,10 +304,10 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) if ex := hits[key]; ex != nil { switch string(ex) { case existsTrue: - cb.operationHits.WithLabelValues(operationExists, cfgName).Inc() + cb.operationHits.WithLabelValues(opExists, cfgName).Inc() return true, nil case existsFalse: - cb.operationHits.WithLabelValues(operationExists, cfgName).Inc() + cb.operationHits.WithLabelValues(opExists, cfgName).Inc() return false, nil default: level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) @@ -344,26 +342,26 @@ func storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationGet], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opGet], name) if cfg == nil { return cb.Bucket.Get(ctx, name) } getCfg := cfg.(getConfig) - cb.operationRequests.WithLabelValues(operationGet, cfgName).Inc() + cb.operationRequests.WithLabelValues(opGet, cfgName).Inc() key := cachingKeyContent(name) existsKey := cachingKeyExists(name) hits := cache.Fetch(ctx, []string{key, existsKey}) if hits[key] != nil { - cb.operationHits.WithLabelValues(operationGet, cfgName).Inc() + cb.operationHits.WithLabelValues(opGet, cfgName).Inc() return ioutil.NopCloser(bytes.NewReader(hits[key])), nil } // If we know that file doesn't exist, we can return that. Useful for deletion marks. if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { - cb.operationHits.WithLabelValues(operationGet, cfgName).Inc() + cb.operationHits.WithLabelValues(opGet, cfgName).Inc() return nil, errObjNotFound } @@ -402,7 +400,7 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationGetRange], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opGetRange], name) if cfg == nil { return cb.Bucket.GetRange(ctx, name, off, length) } @@ -419,7 +417,7 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length } func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[operationObjectSize], name) + cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opObjectSize], name) if cfg == nil { return cb.Bucket.ObjectSize(ctx, name) } @@ -431,11 +429,11 @@ func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, e func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgName string, cache cache.Cache, ttl time.Duration) (uint64, error) { key := cachingKeyObjectSize(name) - cb.operationRequests.WithLabelValues(operationObjectSize, cfgName).Inc() + cb.operationRequests.WithLabelValues(opObjectSize, cfgName).Inc() hits := cache.Fetch(ctx, []string{key}) if s := hits[key]; len(s) == 8 { - cb.operationHits.WithLabelValues(operationObjectSize, cfgName).Inc() + cb.operationHits.WithLabelValues(opObjectSize, cfgName).Inc() return binary.BigEndian.Uint64(s), nil } @@ -452,7 +450,7 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgN } func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cache cache.Cache, cfg getRangeConfig) (io.ReadCloser, error) { - cb.operationRequests.WithLabelValues(operationGetRange, cfgName) + cb.operationRequests.WithLabelValues(opGetRange, cfgName) cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) size, err := cb.cachedObjectSize(ctx, name, cfgName, cache, cfg.objectSizeTTL) @@ -505,7 +503,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset totalCachedBytes += int64(len(b)) cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) } - cb.operationHits.WithLabelValues(operationGetRange, cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) + cb.operationHits.WithLabelValues(opGetRange, cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) if len(hits) < len(keys) { if hits == nil { diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 8c641694e5..fa0f07b647 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -402,12 +402,12 @@ func TestCachedIter(t *testing.T) { } func verifyIter(t *testing.T, cb *CachingBucket, expectedFiles []string, expectedCache bool, cfgName string) { - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationIter, cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opIter, cfgName))) col := iterCollector{} testutil.Ok(t, cb.Iter(context.Background(), "/", col.collect)) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationIter, cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opIter, cfgName))) sort.Strings(col.items) testutil.Equals(t, expectedFiles, col.items) @@ -477,11 +477,11 @@ func TestExistsCachingDisabled(t *testing.T) { func verifyExists(t *testing.T, cb *CachingBucket, file string, exists bool, fromCache bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationExists, cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opExists, cfgName))) ok, err := cb.Exists(context.Background(), file) testutil.Ok(t, err) testutil.Equals(t, exists, ok) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationExists, cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opExists, cfgName))) if fromCache { testutil.Equals(t, 1, hitsAfter-hitsBefore) @@ -521,13 +521,13 @@ func TestGet(t *testing.T) { } func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, cfgName string) { - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opGet, cfgName))) r, err := cb.Get(context.Background(), file) if expectedData == nil { testutil.Assert(t, cb.IsObjNotFoundErr(err)) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opGet, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -540,7 +540,7 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte testutil.Ok(t, err) testutil.Equals(t, expectedData, data) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationGet, cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opGet, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { @@ -573,7 +573,7 @@ func TestObjectSize(t *testing.T) { func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLength int, cacheUsed bool, cfgName string) { t.Helper() - hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationObjectSize, cfgName))) + hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opObjectSize, cfgName))) length, err := cb.ObjectSize(context.Background(), file) if expectedLength < 0 { @@ -582,7 +582,7 @@ func verifyObjectSize(t *testing.T, cb *CachingBucket, file string, expectedLeng testutil.Ok(t, err) testutil.Equals(t, uint64(expectedLength), length) - hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(operationObjectSize, cfgName))) + hitsAfter := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opObjectSize, cfgName))) if cacheUsed { testutil.Equals(t, 1, hitsAfter-hitsBefore) } else { From 871dd78f6831956ac6e8d51515a0fdbddc94fdf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 16:11:01 +0200 Subject: [PATCH 13/25] Simplify caching configuration. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_factory.go | 43 +++++++++-------------- 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 9fe370558b..39b59eec0a 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -30,15 +30,11 @@ const ( deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename ) -// CachingBucketWithBackendConfig is a configuration of caching bucket used by Store component. -type CachingBucketWithBackendConfig struct { +// CachingWithBackendConfig is a configuration of caching bucket used by Store component. +type CachingWithBackendConfig struct { Type BucketCacheProvider `yaml:"backend"` BackendConfig interface{} `yaml:"backend_config"` - CachingBucketConfig CachingBucketConfig `yaml:"caching_config"` -} - -type CachingBucketConfig struct { // Basic unit used to cache chunks. ChunkSubrangeSize int64 `yaml:"chunk_subrange_size"` @@ -58,27 +54,23 @@ type CachingBucketConfig struct { MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` } -func DefaultCachingBucketConfig() CachingBucketConfig { - return CachingBucketConfig{ - ChunkSubrangeSize: 16000, // Equal to max chunk size. - ChunkObjectSizeTTL: 24 * time.Hour, - ChunkSubrangeTTL: 24 * time.Hour, - MaxChunksGetRangeRequests: 3, - - RootIterTTL: 5 * time.Minute, - - MetafileExistsTTL: 10 * time.Minute, - MetafileDoesntExistTTL: 3 * time.Minute, - MetafileContentTTL: 1 * time.Hour, - } +func (cfg *CachingWithBackendConfig) Defaults() { + cfg.ChunkSubrangeSize = 16000 // Equal to max chunk size. + cfg.ChunkObjectSizeTTL = 24 * time.Hour + cfg.ChunkSubrangeTTL = 24 * time.Hour + cfg.MaxChunksGetRangeRequests = 3 + cfg.RootIterTTL = 5 * time.Minute + cfg.MetafileExistsTTL = 10 * time.Minute + cfg.MetafileDoesntExistTTL = 3 * time.Minute + cfg.MetafileContentTTL = 1 * time.Hour } // NewCachingBucketFromYaml uses YAML configuration to create new caching bucket. func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) { level.Info(logger).Log("msg", "loading caching bucket configuration") - config := &CachingBucketWithBackendConfig{} - config.CachingBucketConfig = DefaultCachingBucketConfig() + config := &CachingWithBackendConfig{} + config.Defaults() if err := yaml.UnmarshalStrict(yamlContent, config); err != nil { return nil, errors.Wrap(err, "parsing config YAML file") @@ -108,14 +100,13 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger return nil, err } - cbc := config.CachingBucketConfig // Configure cache. - cb.CacheGetRange("chunks", c, isTSDBChunkFile, cbc.ChunkSubrangeSize, cbc.ChunkObjectSizeTTL, cbc.ChunkSubrangeTTL, cbc.MaxChunksGetRangeRequests) - cb.CacheExists("metafile", c, isMetaFile, cbc.MetafileExistsTTL, cbc.MetafileDoesntExistTTL) - cb.CacheGet("metafile", c, isMetaFile, cbc.MetafileContentTTL, cbc.MetafileExistsTTL, cbc.MetafileDoesntExistTTL) + cb.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectSizeTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests) + cb.CacheExists("metafile", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cb.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, cbc.RootIterTTL) + cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.RootIterTTL) // Enabling index caching (example). cb.CacheObjectSize("index", c, isIndexFile, time.Hour) From ccd39ac1445b01d10571ec47782aa3777e06bfb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 17:00:47 +0200 Subject: [PATCH 14/25] Refactor cache configuration. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Configuration is now passed to the cache when created. Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 171 +++++-------------- pkg/store/cache/caching_bucket_config.go | 195 ++++++++++++++++++++++ pkg/store/cache/caching_bucket_factory.go | 22 +-- pkg/store/cache/caching_bucket_test.go | 59 ++++--- 4 files changed, 282 insertions(+), 165 deletions(-) create mode 100644 pkg/store/cache/caching_bucket_config.go diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 411f4f54a6..6bbc4da08e 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -44,10 +44,11 @@ const ( var errObjNotFound = errors.Errorf("object not found") -// Bucket implementation that provides some caching features, using knowledge about how Thanos accesses data. +// Bucket implementation that provides some caching features, based on passed configuration. type CachingBucket struct { objstore.Bucket + cfg *CachingBucketConfig logger log.Logger requestedGetRangeBytes *prometheus.CounterVec @@ -59,43 +60,15 @@ type CachingBucket struct { operationHits *prometheus.CounterVec } -// Generic config for single operation. -type operationConfig struct { - configName string - matcher func(name string) bool - cache cache.Cache - opConfig interface{} -} - -// Operation-specific configs. -type iterConfig time.Duration - -type existsConfig struct { - existsTTL, doesntExistTTL time.Duration -} - -type getConfig struct { - contentTTL, existsTTL, doesntExistTTL time.Duration -} - -type getRangeConfig struct { - subrangeSize int64 - objectSizeTTL, subrangeTTL time.Duration - maxSubRequests int -} - -type objectSizeCfg time.Duration - -// NewCachingBucket creates caching bucket with no configuration. Various "Cache*" methods configure -// this bucket to cache results of individual bucket methods. Configuration must be set before -// caching bucket is used by other objects. -func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { +// NewCachingBucket creates new caching bucket with provided configuration. +func NewCachingBucket(b objstore.Bucket, cfg *CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { if b == nil { return nil, errors.New("bucket is nil") } cb := &CachingBucket{ Bucket: b, + cfg: cfg, logger: logger, operationConfigs: map[string][]*operationConfig{}, @@ -123,80 +96,21 @@ func NewCachingBucket(b objstore.Bucket, logger log.Logger, reg prometheus.Regis }, []string{"operation", "config"}), } - return cb, nil -} - -func (cb *CachingBucket) addOperationConfig(operationName, configName string, matcher func(string) bool, cache cache.Cache, opConfig interface{}) { - if matcher == nil { - panic("matcher") - } - if cache == nil { - panic("cache") - } - if configName == "" { - panic("empty configName") - } - if opConfig == nil { - panic("nil opConfig") - } - - cb.operationConfigs[operationName] = append(cb.operationConfigs[operationName], &operationConfig{ - configName: configName, - matcher: matcher, - cache: cache, - opConfig: opConfig, - }) + for op, names := range cfg.allConfigNames() { + for _, n := range names { + cb.operationRequests.WithLabelValues(op, n) + cb.operationHits.WithLabelValues(op, n) - cb.operationRequests.WithLabelValues(operationName, configName) - cb.operationHits.WithLabelValues(operationName, configName) -} - -// CacheIter configures caching of "Iter" operation for matching directories. -func (cb *CachingBucket) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration) { - cb.addOperationConfig(opIter, configName, matcher, cache, iterConfig(ttl)) -} - -// CacheExists configures caching of "Exists" operation for matching files. Negative values are cached as well. -func (cb *CachingBucket) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, existsTTL, doesntExistTTL time.Duration) { - cb.addOperationConfig(opExists, configName, matcher, cache, existsConfig{existsTTL, doesntExistTTL}) -} - -// CacheGet configures caching of "Get" operation for matching files. Content of the object is cached, as well as whether object exists or not. -func (cb *CachingBucket) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, contentTTL, existsTTL, doesntExistTTL time.Duration) { - cb.addOperationConfig(opGet, configName, matcher, cache, getConfig{contentTTL: contentTTL, existsTTL: existsTTL, doesntExistTTL: doesntExistTTL}) -} - -// CacheGetRange configures caching of "GetRange" operation. Subranges (aligned on subrange size) are cached individually. -// Since caching operation needs to know the object size to compute correct subranges, object size is cached as well. -// Single "GetRange" requests can result in multiple smaller GetRange sub-requests issued on the underlying bucket. -// MaxSubRequests specifies how many such subrequests may be issued. Values <= 0 mean there is no limit (requests -// for adjacent missing subranges are still merged). -func (cb *CachingBucket) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, subrangeSize int64, objectSizeTTL, subrangeTTL time.Duration, maxSubRequests int) { - cb.addOperationConfig(opGetRange, configName, matcher, cache, getRangeConfig{ - subrangeSize: subrangeSize, - objectSizeTTL: objectSizeTTL, - subrangeTTL: subrangeTTL, - maxSubRequests: maxSubRequests, - }) - - cb.requestedGetRangeBytes.WithLabelValues(configName) - cb.fetchedGetRangeBytes.WithLabelValues(originCache, configName) - cb.fetchedGetRangeBytes.WithLabelValues(originBucket, configName) - cb.refetchedGetRangeBytes.WithLabelValues(originCache, configName) -} - -// CacheObjectSize configures caching of "ObjectSize" operation for matching files. -func (cb *CachingBucket) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, ttl time.Duration) { - cb.addOperationConfig(opObjectSize, configName, matcher, cache, objectSizeCfg(ttl)) -} - -func (cb *CachingBucket) findCacheConfig(configs []*operationConfig, objectName string) (configName string, cache cache.Cache, cfg interface{}) { - for _, cfg := range configs { - if cfg.matcher(objectName) { - return cfg.configName, cfg.cache, cfg.opConfig + if op == opGetRange { + cb.requestedGetRangeBytes.WithLabelValues(n) + cb.fetchedGetRangeBytes.WithLabelValues(originCache, n) + cb.fetchedGetRangeBytes.WithLabelValues(originBucket, n) + cb.refetchedGetRangeBytes.WithLabelValues(originCache, n) + } } } - return "", nil, nil + + return cb, nil } func (cb *CachingBucket) Name() string { @@ -220,17 +134,16 @@ func (cb *CachingBucket) ReaderWithExpectedErrs(expectedFunc objstore.IsOpFailur } func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) error) error { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opIter], dir) + cfgName, cfg := cb.cfg.findIterConfig(dir) if cfg == nil { return cb.Bucket.Iter(ctx, dir, f) } - iterCfg := cfg.(iterConfig) cb.operationRequests.WithLabelValues(opIter, cfgName).Inc() key := cachingKeyIter(dir) - data := cache.Fetch(ctx, []string{key}) + data := cfg.cache.Fetch(ctx, []string{key}) if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { @@ -258,11 +171,11 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er return f(s) }) - remainingTTL := time.Duration(iterCfg) - time.Since(iterTime) + remainingTTL := cfg.ttl - time.Since(iterTime) if err == nil && remainingTTL > 0 { data := encodeIterResult(list) if data != nil { - cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) + cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) } } return err @@ -290,16 +203,15 @@ func decodeIterResult(data []byte) ([]string, error) { } func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opExists], name) + cfgName, cfg := cb.cfg.findExistConfig(name) if cfg == nil { return cb.Bucket.Exists(ctx, name) } - existsCfg := cfg.(existsConfig) cb.operationRequests.WithLabelValues(opExists, cfgName).Inc() key := cachingKeyExists(name) - hits := cache.Fetch(ctx, []string{key}) + hits := cfg.cache.Fetch(ctx, []string{key}) if ex := hits[key]; ex != nil { switch string(ex) { @@ -317,7 +229,7 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) existsTime := time.Now() ok, err := cb.Bucket.Exists(ctx, name) if err == nil { - storeExistsCacheEntry(ctx, key, ok, existsTime, cache, existsCfg.existsTTL, existsCfg.doesntExistTTL) + storeExistsCacheEntry(ctx, key, ok, existsTime, cfg.cache, cfg.existsTTL, cfg.doesntExistTTL) } return ok, err @@ -342,18 +254,17 @@ func storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, } func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opGet], name) + cfgName, cfg := cb.cfg.findGetConfig(name) if cfg == nil { return cb.Bucket.Get(ctx, name) } - getCfg := cfg.(getConfig) cb.operationRequests.WithLabelValues(opGet, cfgName).Inc() key := cachingKeyContent(name) existsKey := cachingKeyExists(name) - hits := cache.Fetch(ctx, []string{key, existsKey}) + hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) if hits[key] != nil { cb.operationHits.WithLabelValues(opGet, cfgName).Inc() return ioutil.NopCloser(bytes.NewReader(hits[key])), nil @@ -370,7 +281,7 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e if err != nil { if cb.Bucket.IsObjNotFoundErr(err) { // Cache that object doesn't exist. - storeExistsCacheEntry(ctx, existsKey, false, getTime, cache, getCfg.existsTTL, getCfg.doesntExistTTL) + storeExistsCacheEntry(ctx, existsKey, false, getTime, cfg.cache, cfg.existsTTL, cfg.doesntExistTTL) } return nil, err @@ -382,11 +293,11 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e return nil, err } - ttl := getCfg.contentTTL - time.Since(getTime) + ttl := cfg.contentTTL - time.Since(getTime) if ttl > 0 { - cache.Store(ctx, map[string][]byte{key: data}, ttl) + cfg.cache.Store(ctx, map[string][]byte{key: data}, ttl) } - storeExistsCacheEntry(ctx, existsKey, true, getTime, cache, getCfg.existsTTL, getCfg.doesntExistTTL) + storeExistsCacheEntry(ctx, existsKey, true, getTime, cfg.cache, cfg.existsTTL, cfg.doesntExistTTL) return ioutil.NopCloser(bytes.NewReader(data)), nil } @@ -400,30 +311,28 @@ func (cb *CachingBucket) GetRange(ctx context.Context, name string, off, length return cb.Bucket.GetRange(ctx, name, off, length) } - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opGetRange], name) + cfgName, cfg := cb.cfg.findGetRangeConfig(name) if cfg == nil { return cb.Bucket.GetRange(ctx, name, off, length) } - rangeCfg := cfg.(getRangeConfig) var ( r io.ReadCloser err error ) tracing.DoInSpan(ctx, "cachingbucket_getrange", func(ctx context.Context) { - r, err = cb.cachedGetRange(ctx, name, off, length, cfgName, cache, rangeCfg) + r, err = cb.cachedGetRange(ctx, name, off, length, cfgName, cfg) }) return r, err } func (cb *CachingBucket) ObjectSize(ctx context.Context, name string) (uint64, error) { - cfgName, cache, cfg := cb.findCacheConfig(cb.operationConfigs[opObjectSize], name) + cfgName, cfg := cb.cfg.findObjectSizeConfig(name) if cfg == nil { return cb.Bucket.ObjectSize(ctx, name) } - osCfg := cfg.(objectSizeCfg) - return cb.cachedObjectSize(ctx, name, cfgName, cache, time.Duration(osCfg)) + return cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.ttl) } func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgName string, cache cache.Cache, ttl time.Duration) (uint64, error) { @@ -449,11 +358,11 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgN return size, nil } -func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cache cache.Cache, cfg getRangeConfig) (io.ReadCloser, error) { +func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *getRangeConfig) (io.ReadCloser, error) { cb.operationRequests.WithLabelValues(opGetRange, cfgName) cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) - size, err := cb.cachedObjectSize(ctx, name, cfgName, cache, cfg.objectSizeTTL) + size, err := cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.objectSizeTTL) if err != nil { return nil, errors.Wrapf(err, "failed to get size of object: %s", name) } @@ -498,7 +407,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset // Try to get all subranges from the cache. totalCachedBytes := int64(0) - hits := cache.Fetch(ctx, keys) + hits := cfg.cache.Fetch(ctx, keys) for _, b := range hits { totalCachedBytes += int64(len(b)) cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) @@ -510,7 +419,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset hits = map[string][]byte{} } - err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cache, cfg) + err := cb.fetchMissingSubranges(ctx, name, startRange, endRange, offsetKeys, hits, lastSubrangeOffset, lastSubrangeLength, cfgName, cfg) if err != nil { return nil, err } @@ -525,7 +434,7 @@ type rng struct { // fetchMissingSubranges fetches missing subranges, stores them into "hits" map // and into cache as well (using provided cacheKeys). -func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, cfgName string, cache cache.Cache, cfg getRangeConfig) error { +func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, startRange, endRange int64, cacheKeys map[int64]string, hits map[string][]byte, lastSubrangeOffset int64, lastSubrangeLength int, cfgName string, cfg *getRangeConfig) error { // Ordered list of missing sub-ranges. var missing []rng @@ -584,7 +493,7 @@ func (cb *CachingBucket) fetchMissingSubranges(ctx context.Context, name string, if storeToCache { cb.fetchedGetRangeBytes.WithLabelValues(originBucket, cfgName).Add(float64(len(subrangeData))) - cache.Store(gctx, map[string][]byte{key: subrangeData}, cfg.subrangeTTL) + cfg.cache.Store(gctx, map[string][]byte{key: subrangeData}, cfg.subrangeTTL) } else { cb.refetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(subrangeData))) } diff --git a/pkg/store/cache/caching_bucket_config.go b/pkg/store/cache/caching_bucket_config.go new file mode 100644 index 0000000000..9696ff6749 --- /dev/null +++ b/pkg/store/cache/caching_bucket_config.go @@ -0,0 +1,195 @@ +package storecache + +import ( + "time" + + "github.com/thanos-io/thanos/pkg/cache" +) + +// CachingBucketConfig contains low-level configuration for individual bucket operations. +// This is not exposed to the user, but it is expected that code sets up individual +// operations based on user-provided configuration. +type CachingBucketConfig struct { + get map[string]*getConfig + iter map[string]*iterConfig + exists map[string]*existsConfig + getRange map[string]*getRangeConfig + objectSize map[string]*objectSizeConfig +} + +func NewCachingBucketConfig() *CachingBucketConfig { + return &CachingBucketConfig{ + get: map[string]*getConfig{}, + iter: map[string]*iterConfig{}, + exists: map[string]*existsConfig{}, + getRange: map[string]*getRangeConfig{}, + objectSize: map[string]*objectSizeConfig{}, + } +} + +// Generic config for single operation. +type operationConfig struct { + matcher func(name string) bool + cache cache.Cache +} + +// Operation-specific configs. +type iterConfig struct { + operationConfig + ttl time.Duration +} + +type existsConfig struct { + operationConfig + existsTTL time.Duration + doesntExistTTL time.Duration +} + +type getConfig struct { + existsConfig + contentTTL time.Duration +} + +type getRangeConfig struct { + operationConfig + subrangeSize int64 + maxSubRequests int + objectSizeTTL time.Duration + subrangeTTL time.Duration +} + +type objectSizeConfig struct { + operationConfig + ttl time.Duration +} + +func newOperationConfig(cache cache.Cache, matcher func(string) bool) operationConfig { + if cache == nil { + panic("cache") + } + if matcher == nil { + panic("matcher") + } + + return operationConfig{ + matcher: matcher, + cache: cache, + } +} + +// CacheIter configures caching of "Iter" operation for matching directories. +func (cfg *CachingBucketConfig) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration) { + cfg.iter[configName] = &iterConfig{ + operationConfig: newOperationConfig(cache, matcher), + ttl: ttl, + } +} + +// CacheGet configures caching of "Get" operation for matching files. Content of the object is cached, as well as whether object exists or not. +func (cfg *CachingBucketConfig) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, contentTTL, existsTTL, doesntExistTTL time.Duration) { + cfg.get[configName] = &getConfig{ + existsConfig: existsConfig{ + operationConfig: newOperationConfig(cache, matcher), + existsTTL: existsTTL, + doesntExistTTL: doesntExistTTL, + }, + contentTTL: contentTTL, + } +} + +// CacheExists configures caching of "Exists" operation for matching files. Negative values are cached as well. +func (cfg *CachingBucketConfig) CacheExists(configName string, cache cache.Cache, matcher func(string) bool, existsTTL, doesntExistTTL time.Duration) { + cfg.exists[configName] = &existsConfig{ + operationConfig: newOperationConfig(cache, matcher), + existsTTL: existsTTL, + doesntExistTTL: doesntExistTTL, + } +} + +// CacheGetRange configures caching of "GetRange" operation. Subranges (aligned on subrange size) are cached individually. +// Since caching operation needs to know the object size to compute correct subranges, object size is cached as well. +// Single "GetRange" requests can result in multiple smaller GetRange sub-requests issued on the underlying bucket. +// MaxSubRequests specifies how many such subrequests may be issued. Values <= 0 mean there is no limit (requests +// for adjacent missing subranges are still merged). +func (cfg *CachingBucketConfig) CacheGetRange(configName string, cache cache.Cache, matcher func(string) bool, subrangeSize int64, objectSizeTTL, subrangeTTL time.Duration, maxSubRequests int) { + cfg.getRange[configName] = &getRangeConfig{ + operationConfig: newOperationConfig(cache, matcher), + subrangeSize: subrangeSize, + objectSizeTTL: objectSizeTTL, + subrangeTTL: subrangeTTL, + maxSubRequests: maxSubRequests, + } +} + +// CacheObjectSize configures caching of "ObjectSize" operation for matching files. +func (cfg *CachingBucketConfig) CacheObjectSize(configName string, cache cache.Cache, matcher func(name string) bool, ttl time.Duration) { + cfg.objectSize[configName] = &objectSizeConfig{ + operationConfig: newOperationConfig(cache, matcher), + ttl: ttl, + } +} + +func (cfg *CachingBucketConfig) allConfigNames() map[string][]string { + result := map[string][]string{} + for n := range cfg.get { + result[opGet] = append(result[opGet], n) + } + for n := range cfg.iter { + result[opIter] = append(result[opIter], n) + } + for n := range cfg.exists { + result[opExists] = append(result[opExists], n) + } + for n := range cfg.getRange { + result[opGetRange] = append(result[opGetRange], n) + } + for n := range cfg.objectSize { + result[opObjectSize] = append(result[opObjectSize], n) + } + return result +} + +func (cfg *CachingBucketConfig) findIterConfig(dir string) (string, *iterConfig) { + for n, cfg := range cfg.iter { + if cfg.matcher(dir) { + return n, cfg + } + } + return "", nil +} + +func (cfg *CachingBucketConfig) findExistConfig(name string) (string, *existsConfig) { + for n, cfg := range cfg.exists { + if cfg.matcher(name) { + return n, cfg + } + } + return "", nil +} + +func (cfg *CachingBucketConfig) findGetConfig(name string) (string, *getConfig) { + for n, cfg := range cfg.get { + if cfg.matcher(name) { + return n, cfg + } + } + return "", nil +} + +func (cfg *CachingBucketConfig) findGetRangeConfig(name string) (string, *getRangeConfig) { + for n, cfg := range cfg.getRange { + if cfg.matcher(name) { + return n, cfg + } + } + return "", nil +} + +func (cfg *CachingBucketConfig) findObjectSizeConfig(name string) (string, *objectSizeConfig) { + for n, cfg := range cfg.objectSize { + if cfg.matcher(name) { + return n, cfg + } + } + return "", nil +} diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 39b59eec0a..d4dd1b3fdd 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -95,22 +95,24 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger return nil, errors.Errorf("unsupported cache type: %s", config.Type) } - cb, err := NewCachingBucket(bucket, logger, reg) - if err != nil { - return nil, err - } + cfg := NewCachingBucketConfig() // Configure cache. - cb.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectSizeTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests) - cb.CacheExists("metafile", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) - cb.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cfg.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectSizeTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests) + cfg.CacheExists("metafile", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cfg.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cb.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.RootIterTTL) + cfg.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.RootIterTTL) // Enabling index caching (example). - cb.CacheObjectSize("index", c, isIndexFile, time.Hour) - cb.CacheGetRange("index", c, isIndexFile, 32000, time.Hour, 24*time.Hour, 3) + cfg.CacheObjectSize("index", c, isIndexFile, time.Hour) + cfg.CacheGetRange("index", c, isIndexFile, 32000, time.Hour, 24*time.Hour, 3) + + cb, err := NewCachingBucket(bucket, cfg, logger, reg) + if err != nil { + return nil, err + } return cb, nil } diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index fa0f07b647..fc802d0063 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -222,9 +222,11 @@ func TestChunksCaching(t *testing.T) { tc.init() } - cachingBucket, err := NewCachingBucket(inmem, nil, nil) + cfg := NewCachingBucketConfig() + cfg.CacheGetRange("chunks", cache, isTSDBChunkFile, subrangeSize, time.Hour, time.Hour, tc.maxGetRangeRequests) + + cachingBucket, err := NewCachingBucket(inmem, cfg, nil, nil) testutil.Ok(t, err) - cachingBucket.CacheGetRange("chunks", cache, isTSDBChunkFile, subrangeSize, time.Hour, time.Hour, tc.maxGetRangeRequests) verifyGetRange(t, cachingBucket, name, tc.offset, tc.length, tc.expectedLength) testutil.Equals(t, tc.expectedCachedBytes, int64(promtest.ToFloat64(cachingBucket.fetchedGetRangeBytes.WithLabelValues(originCache, "chunks")))) @@ -333,9 +335,12 @@ func TestMergeRanges(t *testing.T) { func TestInvalidOffsetAndLength(t *testing.T) { b := &testBucket{objstore.NewInMemBucket()} - c, err := NewCachingBucket(b, nil, nil) + + cfg := NewCachingBucketConfig() + cfg.CacheGetRange("chunks", newMockCache(), func(string) bool { return true }, 10000, time.Hour, time.Hour, 3) + + c, err := NewCachingBucket(b, cfg, nil, nil) testutil.Ok(t, err) - c.CacheGetRange("chunks", newMockCache(), func(string) bool { return true }, 10000, time.Hour, time.Hour, 3) r, err := c.GetRange(context.Background(), "test", -1, 1000) testutil.Equals(t, nil, r) @@ -374,10 +379,12 @@ func TestCachedIter(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - cb, err := NewCachingBucket(inmem, nil, nil) - testutil.Ok(t, err) const cfgName = "dirs" - cb.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute) + cfg := NewCachingBucketConfig() + cfg.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) verifyIter(t, cb, allFiles, false, cfgName) @@ -435,11 +442,12 @@ func TestExists(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - cb, err := NewCachingBucket(inmem, nil, nil) - testutil.Ok(t, err) - + cfg := NewCachingBucketConfig() const cfgName = "test" - cb.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + cfg.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) verifyExists(t, cb, testFilename, false, false, cfgName) @@ -460,11 +468,12 @@ func TestExistsCachingDisabled(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - cb, err := NewCachingBucket(inmem, nil, nil) - testutil.Ok(t, err) - + cfg := NewCachingBucketConfig() const cfgName = "test" - cb.CacheExists(cfgName, cache, func(string) bool { return false }, 10*time.Minute, 2*time.Minute) + cfg.CacheExists(cfgName, cache, func(string) bool { return false }, 10*time.Minute, 2*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) verifyExists(t, cb, testFilename, false, false, cfgName) @@ -496,12 +505,13 @@ func TestGet(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - cb, err := NewCachingBucket(inmem, nil, nil) - testutil.Ok(t, err) - + cfg := NewCachingBucketConfig() const cfgName = "metafile" - cb.CacheGet(cfgName, cache, matchAll, 10*time.Minute, 10*time.Minute, 2*time.Minute) - cb.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + cfg.CacheGet(cfgName, cache, matchAll, 10*time.Minute, 10*time.Minute, 2*time.Minute) + cfg.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) verifyGet(t, cb, testFilename, nil, false, cfgName) verifyExists(t, cb, testFilename, false, true, cfgName) @@ -555,11 +565,12 @@ func TestObjectSize(t *testing.T) { // We reuse cache between tests (!) cache := newMockCache() - cb, err := NewCachingBucket(inmem, nil, nil) - testutil.Ok(t, err) - + cfg := NewCachingBucketConfig() const cfgName = "test" - cb.CacheObjectSize(cfgName, cache, matchAll, time.Minute) + cfg.CacheObjectSize(cfgName, cache, matchAll, time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) verifyObjectSize(t, cb, testFilename, -1, false, cfgName) verifyObjectSize(t, cb, testFilename, -1, false, cfgName) // ObjectSize doesn't cache non-existent files. From c4bd77cc49b9dbeddf32cc7ab485b10f90a7cc63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 17:22:35 +0200 Subject: [PATCH 15/25] Review feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 60 +++++++++-------------- pkg/store/cache/caching_bucket_factory.go | 12 +++-- 2 files changed, 31 insertions(+), 41 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 6bbc4da08e..42b64808fa 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -11,6 +11,7 @@ import ( "fmt" "io" "io/ioutil" + "strconv" "sync" "time" @@ -32,9 +33,6 @@ const ( originCache = "cache" originBucket = "bucket" - existsTrue = "true" - existsFalse = "false" - opGet = "get" opGetRange = "getrange" opIter = "iter" @@ -60,7 +58,8 @@ type CachingBucket struct { operationHits *prometheus.CounterVec } -// NewCachingBucket creates new caching bucket with provided configuration. +// NewCachingBucket creates new caching bucket with provided configuration. Configuration should not be +// changed after creating caching bucket. func NewCachingBucket(b objstore.Bucket, cfg *CachingBucketConfig, logger log.Logger, reg prometheus.Registerer) (*CachingBucket, error) { if b == nil { return nil, errors.New("bucket is nil") @@ -142,24 +141,19 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er cb.operationRequests.WithLabelValues(opIter, cfgName).Inc() key := cachingKeyIter(dir) - data := cfg.cache.Fetch(ctx, []string{key}) if data[key] != nil { list, err := decodeIterResult(data[key]) if err == nil { cb.operationHits.WithLabelValues(opIter, cfgName).Inc() - for _, n := range list { - err = f(n) - if err != nil { + if err := f(n); err != nil { return err } } return nil - } else { - // This should not happen. - level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "err", err) } + level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "err", err) } // Iteration can take a while (esp. since it calls function), and iterTTL is generally low. @@ -173,8 +167,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er remainingTTL := cfg.ttl - time.Since(iterTime) if err == nil && remainingTTL > 0 { - data := encodeIterResult(list) - if data != nil { + if data := encodeIterResult(list); data != nil { cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) } } @@ -214,16 +207,12 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) hits := cfg.cache.Fetch(ctx, []string{key}) if ex := hits[key]; ex != nil { - switch string(ex) { - case existsTrue: - cb.operationHits.WithLabelValues(opExists, cfgName).Inc() - return true, nil - case existsFalse: + exists, err := strconv.ParseBool(string(ex)) + if err == nil { cb.operationHits.WithLabelValues(opExists, cfgName).Inc() - return false, nil - default: - level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) + return exists, nil } + level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) } existsTime := time.Now() @@ -236,20 +225,15 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) } func storeExistsCacheEntry(ctx context.Context, cachingKey string, exists bool, ts time.Time, cache cache.Cache, existsTTL, doesntExistTTL time.Duration) { - var ( - data []byte - ttl time.Duration - ) + var ttl time.Duration if exists { ttl = existsTTL - time.Since(ts) - data = []byte(existsTrue) } else { ttl = doesntExistTTL - time.Since(ts) - data = []byte(existsFalse) } if ttl > 0 { - cache.Store(ctx, map[string][]byte{cachingKey: data}, ttl) + cache.Store(ctx, map[string][]byte{cachingKey: []byte(strconv.FormatBool(exists))}, ttl) } } @@ -261,19 +245,21 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e cb.operationRequests.WithLabelValues(opGet, cfgName).Inc() - key := cachingKeyContent(name) + contentKey := cachingKeyContent(name) existsKey := cachingKeyExists(name) - hits := cfg.cache.Fetch(ctx, []string{key, existsKey}) - if hits[key] != nil { + hits := cfg.cache.Fetch(ctx, []string{contentKey, existsKey}) + if hits[contentKey] != nil { cb.operationHits.WithLabelValues(opGet, cfgName).Inc() - return ioutil.NopCloser(bytes.NewReader(hits[key])), nil + return ioutil.NopCloser(bytes.NewReader(hits[contentKey])), nil } // If we know that file doesn't exist, we can return that. Useful for deletion marks. - if ex := hits[existsKey]; ex != nil && string(ex) == existsFalse { - cb.operationHits.WithLabelValues(opGet, cfgName).Inc() - return nil, errObjNotFound + if ex := hits[existsKey]; ex != nil { + if exists, err := strconv.ParseBool(string(ex)); err == nil && !exists { + cb.operationHits.WithLabelValues(opGet, cfgName).Inc() + return nil, errObjNotFound + } } getTime := time.Now() @@ -295,7 +281,7 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e ttl := cfg.contentTTL - time.Since(getTime) if ttl > 0 { - cfg.cache.Store(ctx, map[string][]byte{key: data}, ttl) + cfg.cache.Store(ctx, map[string][]byte{contentKey: data}, ttl) } storeExistsCacheEntry(ctx, existsKey, true, getTime, cfg.cache, cfg.existsTTL, cfg.doesntExistTTL) @@ -410,8 +396,8 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset hits := cfg.cache.Fetch(ctx, keys) for _, b := range hits { totalCachedBytes += int64(len(b)) - cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(len(b))) } + cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(totalCachedBytes)) cb.operationHits.WithLabelValues(opGetRange, cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) if len(hits) < len(keys) { diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index d4dd1b3fdd..f6db8d3569 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -46,7 +46,7 @@ type CachingWithBackendConfig struct { ChunkSubrangeTTL time.Duration `yaml:"chunk_subrange_ttl"` // How long to cache result of Iter call in root directory. - RootIterTTL time.Duration `yaml:"root_iter_ttl"` + BlocksIterTTL time.Duration `yaml:"blocks_iter_ttl"` // Config for Exists and Get operations for metadata files. MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` @@ -59,10 +59,10 @@ func (cfg *CachingWithBackendConfig) Defaults() { cfg.ChunkObjectSizeTTL = 24 * time.Hour cfg.ChunkSubrangeTTL = 24 * time.Hour cfg.MaxChunksGetRangeRequests = 3 - cfg.RootIterTTL = 5 * time.Minute + cfg.BlocksIterTTL = 5 * time.Minute cfg.MetafileExistsTTL = 10 * time.Minute cfg.MetafileDoesntExistTTL = 3 * time.Minute - cfg.MetafileContentTTL = 1 * time.Hour + cfg.MetafileContentTTL = 24 * time.Hour } // NewCachingBucketFromYaml uses YAML configuration to create new caching bucket. @@ -103,7 +103,7 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger cfg.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cfg.CacheIter("dir", c, func(dir string) bool { return dir == "" }, config.RootIterTTL) + cfg.CacheIter("dir", c, isBlocksRootDir, config.BlocksIterTTL) // Enabling index caching (example). cfg.CacheObjectSize("index", c, isIndexFile, time.Hour) @@ -128,3 +128,7 @@ func isMetaFile(name string) bool { func isIndexFile(name string) bool { return strings.HasSuffix(name, "/index") } + +func isBlocksRootDir(name string) bool { + return name == "" +} From dfd54eda438588d589fd6954aab0c082a629a335 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 17:37:51 +0200 Subject: [PATCH 16/25] Fix operationRequests and operationHits for getRange. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 42b64808fa..7ad1919aef 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -345,7 +345,7 @@ func (cb *CachingBucket) cachedObjectSize(ctx context.Context, name string, cfgN } func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset, length int64, cfgName string, cfg *getRangeConfig) (io.ReadCloser, error) { - cb.operationRequests.WithLabelValues(opGetRange, cfgName) + cb.operationRequests.WithLabelValues(opGetRange, cfgName).Inc() cb.requestedGetRangeBytes.WithLabelValues(cfgName).Add(float64(length)) size, err := cb.cachedObjectSize(ctx, name, cfgName, cfg.cache, cfg.objectSizeTTL) @@ -398,7 +398,7 @@ func (cb *CachingBucket) cachedGetRange(ctx context.Context, name string, offset totalCachedBytes += int64(len(b)) } cb.fetchedGetRangeBytes.WithLabelValues(originCache, cfgName).Add(float64(totalCachedBytes)) - cb.operationHits.WithLabelValues(opGetRange, cfgName).Add(float64(totalCachedBytes) / float64(totalRequestedBytes)) + cb.operationHits.WithLabelValues(opGetRange, cfgName).Add(float64(len(hits)) / float64(len(keys))) if len(hits) < len(keys) { if hits == nil { From f2090c7cd175bb0b9666c2c35cc46a8e927598f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 17:53:03 +0200 Subject: [PATCH 17/25] Make codec for Iter results configurable. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 41 ++++++++++------------- pkg/store/cache/caching_bucket_config.go | 12 +++++-- pkg/store/cache/caching_bucket_factory.go | 2 +- pkg/store/cache/caching_bucket_test.go | 2 +- 4 files changed, 29 insertions(+), 28 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 7ad1919aef..444e52260f 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -17,7 +17,6 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/golang/snappy" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -143,7 +142,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er key := cachingKeyIter(dir) data := cfg.cache.Fetch(ctx, []string{key}) if data[key] != nil { - list, err := decodeIterResult(data[key]) + list, err := cfg.codec.Decode(data[key]) if err == nil { cb.operationHits.WithLabelValues(opIter, cfgName).Inc() for _, n := range list { @@ -167,34 +166,15 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er remainingTTL := cfg.ttl - time.Since(iterTime) if err == nil && remainingTTL > 0 { - if data := encodeIterResult(list); data != nil { + if data, err := cfg.codec.Encode(list); err == nil { cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) + } else { + level.Warn(cb.logger).Log("msg", "failed to encode Iter result", "err", err) } } return err } -// Iter results should compress nicely, especially in subdirectories. -func encodeIterResult(files []string) []byte { - data, err := json.Marshal(files) - if err != nil { - return nil - } - - return snappy.Encode(nil, data) -} - -func decodeIterResult(data []byte) ([]string, error) { - decoded, err := snappy.Decode(nil, data) - if err != nil { - return nil, err - } - - var list []string - err = json.Unmarshal(decoded, &list) - return list, err -} - func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) { cfgName, cfg := cb.cfg.findExistConfig(name) if cfg == nil { @@ -595,3 +575,16 @@ func (c *subrangesReader) subrangeAt(offset int64) ([]byte, error) { } return b, nil } + +// JsonIterCodec encodes iter results into JSON. Suitable for root dir. +type JsonIterCodec struct{} + +func (jic JsonIterCodec) Encode(files []string) ([]byte, error) { + return json.Marshal(files) +} + +func (jic JsonIterCodec) Decode(data []byte) ([]string, error) { + var list []string + err := json.Unmarshal(data, &list) + return list, err +} diff --git a/pkg/store/cache/caching_bucket_config.go b/pkg/store/cache/caching_bucket_config.go index 9696ff6749..96dc7a1610 100644 --- a/pkg/store/cache/caching_bucket_config.go +++ b/pkg/store/cache/caching_bucket_config.go @@ -6,6 +6,12 @@ import ( "github.com/thanos-io/thanos/pkg/cache" ) +// Codec for encoding and decoding results of Iter call. +type IterCodec interface { + Encode(files []string) ([]byte, error) + Decode(cachedData []byte) ([]string, error) +} + // CachingBucketConfig contains low-level configuration for individual bucket operations. // This is not exposed to the user, but it is expected that code sets up individual // operations based on user-provided configuration. @@ -36,7 +42,8 @@ type operationConfig struct { // Operation-specific configs. type iterConfig struct { operationConfig - ttl time.Duration + ttl time.Duration + codec IterCodec } type existsConfig struct { @@ -78,10 +85,11 @@ func newOperationConfig(cache cache.Cache, matcher func(string) bool) operationC } // CacheIter configures caching of "Iter" operation for matching directories. -func (cfg *CachingBucketConfig) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration) { +func (cfg *CachingBucketConfig) CacheIter(configName string, cache cache.Cache, matcher func(string) bool, ttl time.Duration, codec IterCodec) { cfg.iter[configName] = &iterConfig{ operationConfig: newOperationConfig(cache, matcher), ttl: ttl, + codec: codec, } } diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index f6db8d3569..f9296d92bf 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -103,7 +103,7 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger cfg.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cfg.CacheIter("dir", c, isBlocksRootDir, config.BlocksIterTTL) + cfg.CacheIter("dir", c, isBlocksRootDir, config.BlocksIterTTL, JsonIterCodec{}) // Enabling index caching (example). cfg.CacheObjectSize("index", c, isIndexFile, time.Hour) diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index fc802d0063..bc37112aa8 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -381,7 +381,7 @@ func TestCachedIter(t *testing.T) { const cfgName = "dirs" cfg := NewCachingBucketConfig() - cfg.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute) + cfg.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute, JsonIterCodec{}) cb, err := NewCachingBucket(inmem, cfg, nil, nil) testutil.Ok(t, err) From 3c390db10541a9394e4dc6f01bbe68801456dfb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Thu, 14 May 2020 17:57:30 +0200 Subject: [PATCH 18/25] Added header. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_config.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/store/cache/caching_bucket_config.go b/pkg/store/cache/caching_bucket_config.go index 96dc7a1610..62a5cedf35 100644 --- a/pkg/store/cache/caching_bucket_config.go +++ b/pkg/store/cache/caching_bucket_config.go @@ -1,3 +1,6 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + package storecache import ( From 5bb414c32e4ae8f11e9d374a4d58643261b89d3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 12:10:27 +0200 Subject: [PATCH 19/25] Renamed "dir" config to "blocks-iter". MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_factory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index f9296d92bf..390365009a 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -103,7 +103,7 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger cfg.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cfg.CacheIter("dir", c, isBlocksRootDir, config.BlocksIterTTL, JsonIterCodec{}) + cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JsonIterCodec{}) // Enabling index caching (example). cfg.CacheObjectSize("index", c, isIndexFile, time.Hour) From 1d9e85e6b625cc2c4d30146530a8d0b28cbdba72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 12:15:44 +0200 Subject: [PATCH 20/25] Bump default values for meta exists/doesntExist ttls. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_factory.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 390365009a..a53bbf86e5 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -60,8 +60,8 @@ func (cfg *CachingWithBackendConfig) Defaults() { cfg.ChunkSubrangeTTL = 24 * time.Hour cfg.MaxChunksGetRangeRequests = 3 cfg.BlocksIterTTL = 5 * time.Minute - cfg.MetafileExistsTTL = 10 * time.Minute - cfg.MetafileDoesntExistTTL = 3 * time.Minute + cfg.MetafileExistsTTL = 2 * time.Hour + cfg.MetafileDoesntExistTTL = 15 * time.Minute cfg.MetafileContentTTL = 24 * time.Hour } From 0d09947ae72d7b416b2de06896ae51907f67b324 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 12:19:01 +0200 Subject: [PATCH 21/25] Removed example how cache could be configured for index. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index a53bbf86e5..bbbd10f230 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -105,10 +105,6 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger // Cache Iter requests for root. cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JsonIterCodec{}) - // Enabling index caching (example). - cfg.CacheObjectSize("index", c, isIndexFile, time.Hour) - cfg.CacheGetRange("index", c, isIndexFile, 32000, time.Hour, 24*time.Hour, 3) - cb, err := NewCachingBucket(bucket, cfg, logger, reg) if err != nil { return nil, err From 42e35270cb93483b6010c785cdc56b7a4da7d587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 15:08:09 +0200 Subject: [PATCH 22/25] Address review feedback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 22 ++++++++++++---------- pkg/store/cache/caching_bucket_factory.go | 15 +++++---------- pkg/store/cache/caching_bucket_test.go | 2 +- 3 files changed, 18 insertions(+), 21 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 444e52260f..510bb75043 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -41,7 +41,7 @@ const ( var errObjNotFound = errors.Errorf("object not found") -// Bucket implementation that provides some caching features, based on passed configuration. +// CachingBucket implementation that provides some caching features, based on passed configuration. type CachingBucket struct { objstore.Bucket @@ -152,7 +152,7 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er } return nil } - level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "err", err) + level.Warn(cb.logger).Log("msg", "failed to decode cached Iter result", "key", key, "err", err) } // Iteration can take a while (esp. since it calls function), and iterTTL is generally low. @@ -166,11 +166,12 @@ func (cb *CachingBucket) Iter(ctx context.Context, dir string, f func(string) er remainingTTL := cfg.ttl - time.Since(iterTime) if err == nil && remainingTTL > 0 { - if data, err := cfg.codec.Encode(list); err == nil { + data, encErr := cfg.codec.Encode(list) + if encErr == nil { cfg.cache.Store(ctx, map[string][]byte{key: data}, remainingTTL) - } else { - level.Warn(cb.logger).Log("msg", "failed to encode Iter result", "err", err) + return nil } + level.Warn(cb.logger).Log("msg", "failed to encode Iter result", "key", key, "err", encErr) } return err } @@ -192,7 +193,7 @@ func (cb *CachingBucket) Exists(ctx context.Context, name string) (bool, error) cb.operationHits.WithLabelValues(opExists, cfgName).Inc() return exists, nil } - level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "val", string(ex)) + level.Warn(cb.logger).Log("msg", "unexpected cached 'exists' value", "key", key, "val", string(ex)) } existsTime := time.Now() @@ -254,6 +255,7 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e } defer runutil.CloseWithLogOnErr(cb.logger, reader, "CachingBucket.Get(%q)", name) + // TODO: Returned reader should use streaming, and have limit for how big items can be cached. data, err := ioutil.ReadAll(reader) if err != nil { return nil, err @@ -576,14 +578,14 @@ func (c *subrangesReader) subrangeAt(offset int64) ([]byte, error) { return b, nil } -// JsonIterCodec encodes iter results into JSON. Suitable for root dir. -type JsonIterCodec struct{} +// JSONIterCodec encodes iter results into JSON. Suitable for root dir. +type JSONIterCodec struct{} -func (jic JsonIterCodec) Encode(files []string) ([]byte, error) { +func (jic JSONIterCodec) Encode(files []string) ([]byte, error) { return json.Marshal(files) } -func (jic JsonIterCodec) Decode(data []byte) ([]string, error) { +func (jic JSONIterCodec) Decode(data []byte) ([]string, error) { var list []string err := json.Unmarshal(data, &list) return list, err diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index bbbd10f230..746621a482 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -23,12 +23,7 @@ import ( // BucketCacheProvider is a type used to evaluate all bucket cache providers. type BucketCacheProvider string -const ( - MemcachedBucketCacheProvider BucketCacheProvider = "memcached" // Memcached cache-provider for caching bucket. - - metaFilenameSuffix = "/" + metadata.MetaFilename - deletionMarkFilenameSuffix = "/" + metadata.DeletionMarkFilename -) +const MemcachedBucketCacheProvider BucketCacheProvider = "memcached" // Memcached cache-provider for caching bucket. // CachingWithBackendConfig is a configuration of caching bucket used by Store component. type CachingWithBackendConfig struct { @@ -99,11 +94,11 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger // Configure cache. cfg.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectSizeTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests) - cfg.CacheExists("metafile", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) - cfg.CacheGet("metafile", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cfg.CacheExists("meta.jsons", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cfg.CacheGet("meta.jsons", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. - cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JsonIterCodec{}) + cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JSONIterCodec{}) cb, err := NewCachingBucket(bucket, cfg, logger, reg) if err != nil { @@ -118,7 +113,7 @@ var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`) func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) } func isMetaFile(name string) bool { - return strings.HasSuffix(name, metaFilenameSuffix) || strings.HasSuffix(name, deletionMarkFilenameSuffix) + return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) } func isIndexFile(name string) bool { diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index bc37112aa8..fc74222ec3 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -381,7 +381,7 @@ func TestCachedIter(t *testing.T) { const cfgName = "dirs" cfg := NewCachingBucketConfig() - cfg.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute, JsonIterCodec{}) + cfg.CacheIter(cfgName, cache, func(string) bool { return true }, 5*time.Minute, JSONIterCodec{}) cb, err := NewCachingBucket(inmem, cfg, nil, nil) testutil.Ok(t, err) From 9c8523e93b34589471e1bd5c703404c774cdf838 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 15:36:30 +0200 Subject: [PATCH 23/25] Get now implements streaming reader, and buffers object in memory. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket.go | 63 ++++++++++++++++++----- pkg/store/cache/caching_bucket_config.go | 8 +-- pkg/store/cache/caching_bucket_factory.go | 5 +- pkg/store/cache/caching_bucket_test.go | 28 +++++++++- 4 files changed, 85 insertions(+), 19 deletions(-) diff --git a/pkg/store/cache/caching_bucket.go b/pkg/store/cache/caching_bucket.go index 510bb75043..c725719a73 100644 --- a/pkg/store/cache/caching_bucket.go +++ b/pkg/store/cache/caching_bucket.go @@ -253,21 +253,18 @@ func (cb *CachingBucket) Get(ctx context.Context, name string) (io.ReadCloser, e return nil, err } - defer runutil.CloseWithLogOnErr(cb.logger, reader, "CachingBucket.Get(%q)", name) - // TODO: Returned reader should use streaming, and have limit for how big items can be cached. - data, err := ioutil.ReadAll(reader) - if err != nil { - return nil, err - } - - ttl := cfg.contentTTL - time.Since(getTime) - if ttl > 0 { - cfg.cache.Store(ctx, map[string][]byte{contentKey: data}, ttl) - } storeExistsCacheEntry(ctx, existsKey, true, getTime, cfg.cache, cfg.existsTTL, cfg.doesntExistTTL) - - return ioutil.NopCloser(bytes.NewReader(data)), nil + return &getReader{ + c: cfg.cache, + ctx: ctx, + r: reader, + buf: new(bytes.Buffer), + startTime: getTime, + ttl: cfg.contentTTL, + cacheKey: contentKey, + maxSize: cfg.maxCacheableSize, + }, nil } func (cb *CachingBucket) IsObjNotFoundErr(err error) bool { @@ -578,6 +575,46 @@ func (c *subrangesReader) subrangeAt(offset int64) ([]byte, error) { return b, nil } +type getReader struct { + c cache.Cache + ctx context.Context + r io.ReadCloser + buf *bytes.Buffer + startTime time.Time + ttl time.Duration + cacheKey string + maxSize int +} + +func (g *getReader) Close() error { + // We don't know if entire object was read, don't store it here. + g.buf = nil + return g.r.Close() +} + +func (g *getReader) Read(p []byte) (n int, err error) { + n, err = g.r.Read(p) + if n > 0 && g.buf != nil { + if g.buf.Len()+n <= g.maxSize { + g.buf.Write(p[:n]) + } else { + // Object is larger than max size, stop caching. + g.buf = nil + } + } + + if err == io.EOF && g.buf != nil { + remainingTTL := g.ttl - time.Since(g.startTime) + if remainingTTL > 0 { + g.c.Store(g.ctx, map[string][]byte{g.cacheKey: g.buf.Bytes()}, remainingTTL) + } + // Clear reference, to avoid doing another Store on next read. + g.buf = nil + } + + return n, err +} + // JSONIterCodec encodes iter results into JSON. Suitable for root dir. type JSONIterCodec struct{} diff --git a/pkg/store/cache/caching_bucket_config.go b/pkg/store/cache/caching_bucket_config.go index 62a5cedf35..dce0350fdf 100644 --- a/pkg/store/cache/caching_bucket_config.go +++ b/pkg/store/cache/caching_bucket_config.go @@ -57,7 +57,8 @@ type existsConfig struct { type getConfig struct { existsConfig - contentTTL time.Duration + contentTTL time.Duration + maxCacheableSize int } type getRangeConfig struct { @@ -97,14 +98,15 @@ func (cfg *CachingBucketConfig) CacheIter(configName string, cache cache.Cache, } // CacheGet configures caching of "Get" operation for matching files. Content of the object is cached, as well as whether object exists or not. -func (cfg *CachingBucketConfig) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, contentTTL, existsTTL, doesntExistTTL time.Duration) { +func (cfg *CachingBucketConfig) CacheGet(configName string, cache cache.Cache, matcher func(string) bool, maxCacheableSize int, contentTTL, existsTTL, doesntExistTTL time.Duration) { cfg.get[configName] = &getConfig{ existsConfig: existsConfig{ operationConfig: newOperationConfig(cache, matcher), existsTTL: existsTTL, doesntExistTTL: doesntExistTTL, }, - contentTTL: contentTTL, + contentTTL: contentTTL, + maxCacheableSize: maxCacheableSize, } } diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 746621a482..4a461bb382 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -17,6 +17,7 @@ import ( "github.com/thanos-io/thanos/pkg/block/metadata" cache "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" + "github.com/thanos-io/thanos/pkg/model" "github.com/thanos-io/thanos/pkg/objstore" ) @@ -47,6 +48,7 @@ type CachingWithBackendConfig struct { MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` + MetafileMaxSize model.Bytes `yaml:"metafile_max_size"` } func (cfg *CachingWithBackendConfig) Defaults() { @@ -58,6 +60,7 @@ func (cfg *CachingWithBackendConfig) Defaults() { cfg.MetafileExistsTTL = 2 * time.Hour cfg.MetafileDoesntExistTTL = 15 * time.Minute cfg.MetafileContentTTL = 24 * time.Hour + cfg.MetafileMaxSize = 1024 * 1024 // Equal to default MaxItemSize in memcached client. } // NewCachingBucketFromYaml uses YAML configuration to create new caching bucket. @@ -95,7 +98,7 @@ func NewCachingBucketFromYaml(yamlContent []byte, bucket objstore.Bucket, logger // Configure cache. cfg.CacheGetRange("chunks", c, isTSDBChunkFile, config.ChunkSubrangeSize, config.ChunkObjectSizeTTL, config.ChunkSubrangeTTL, config.MaxChunksGetRangeRequests) cfg.CacheExists("meta.jsons", c, isMetaFile, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) - cfg.CacheGet("meta.jsons", c, isMetaFile, config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) + cfg.CacheGet("meta.jsons", c, isMetaFile, int(config.MetafileMaxSize), config.MetafileContentTTL, config.MetafileExistsTTL, config.MetafileDoesntExistTTL) // Cache Iter requests for root. cfg.CacheIter("blocks-iter", c, isBlocksRootDir, config.BlocksIterTTL, JSONIterCodec{}) diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index fc74222ec3..5f7731ea35 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -507,7 +507,7 @@ func TestGet(t *testing.T) { cfg := NewCachingBucketConfig() const cfgName = "metafile" - cfg.CacheGet(cfgName, cache, matchAll, 10*time.Minute, 10*time.Minute, 2*time.Minute) + cfg.CacheGet(cfgName, cache, matchAll, 1024, 10*time.Minute, 10*time.Minute, 2*time.Minute) cfg.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) cb, err := NewCachingBucket(inmem, cfg, nil, nil) @@ -530,6 +530,30 @@ func TestGet(t *testing.T) { verifyExists(t, cb, testFilename, true, true, cfgName) } +func TestGetTooBigObject(t *testing.T) { + inmem := objstore.NewInMemBucket() + + // We reuse cache between tests (!) + cache := newMockCache() + + cfg := NewCachingBucketConfig() + const cfgName = "metafile" + // Only allow 5 bytes to be cached. + cfg.CacheGet(cfgName, cache, matchAll, 5, 10*time.Minute, 10*time.Minute, 2*time.Minute) + cfg.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) + + data := []byte("hello world") + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) + + // Object is too big, so it will not be stored to cache on first read. + verifyGet(t, cb, testFilename, data, false, cfgName) + verifyGet(t, cb, testFilename, data, false, cfgName) + verifyExists(t, cb, testFilename, true, true, cfgName) +} + func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, cfgName string) { hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opGet, cfgName))) @@ -545,7 +569,7 @@ func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte } } else { testutil.Ok(t, err) - runutil.CloseWithLogOnErr(nil, r, "verifyGet") + defer runutil.CloseWithLogOnErr(nil, r, "verifyGet") data, err := ioutil.ReadAll(r) testutil.Ok(t, err) testutil.Equals(t, expectedData, data) From 20145eb9a6938ab4fcfd2c1ee52f40e1bf8c413c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 15:39:49 +0200 Subject: [PATCH 24/25] Added test for partial read. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_test.go | 29 ++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/pkg/store/cache/caching_bucket_test.go b/pkg/store/cache/caching_bucket_test.go index 5f7731ea35..d392d9066e 100644 --- a/pkg/store/cache/caching_bucket_test.go +++ b/pkg/store/cache/caching_bucket_test.go @@ -554,6 +554,35 @@ func TestGetTooBigObject(t *testing.T) { verifyExists(t, cb, testFilename, true, true, cfgName) } +func TestGetPartialRead(t *testing.T) { + inmem := objstore.NewInMemBucket() + + cache := newMockCache() + + cfg := NewCachingBucketConfig() + const cfgName = "metafile" + cfg.CacheGet(cfgName, cache, matchAll, 1024, 10*time.Minute, 10*time.Minute, 2*time.Minute) + cfg.CacheExists(cfgName, cache, matchAll, 10*time.Minute, 2*time.Minute) + + cb, err := NewCachingBucket(inmem, cfg, nil, nil) + testutil.Ok(t, err) + + data := []byte("hello world") + testutil.Ok(t, inmem.Upload(context.Background(), testFilename, bytes.NewBuffer(data))) + + // Read only few bytes from data. + r, err := cb.Get(context.Background(), testFilename) + testutil.Ok(t, err) + _, err = r.Read(make([]byte, 1)) + testutil.Ok(t, err) + testutil.Ok(t, r.Close()) + + // Object wasn't cached as it wasn't fully read. + verifyGet(t, cb, testFilename, data, false, cfgName) + // VerifyGet read object, so now it's cached. + verifyGet(t, cb, testFilename, data, true, cfgName) +} + func verifyGet(t *testing.T, cb *CachingBucket, file string, expectedData []byte, cacheUsed bool, cfgName string) { hitsBefore := int(promtest.ToFloat64(cb.operationHits.WithLabelValues(opGet, cfgName))) From d6e629a4afdee7e6ac0c9a57b953323f16730264 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20S=CC=8Ctibrany=CC=81?= Date: Fri, 15 May 2020 15:44:41 +0200 Subject: [PATCH 25/25] Removed unused function. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- pkg/store/cache/caching_bucket_factory.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/store/cache/caching_bucket_factory.go b/pkg/store/cache/caching_bucket_factory.go index 4a461bb382..3dbd60276e 100644 --- a/pkg/store/cache/caching_bucket_factory.go +++ b/pkg/store/cache/caching_bucket_factory.go @@ -119,10 +119,6 @@ func isMetaFile(name string) bool { return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) } -func isIndexFile(name string) bool { - return strings.HasSuffix(name, "/index") -} - func isBlocksRootDir(name string) bool { return name == "" }