Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix lazy postings with zero length #7083

Merged
merged 4 commits into from Jan 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Expand Up @@ -12,6 +12,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re

### Fixed

- [#7083](https://github.com/thanos-io/thanos/pull/7083) Store Gateway: Fix lazy expanded postings with 0 length failed to be cached.

### Added

### Changed
Expand Down
17 changes: 10 additions & 7 deletions pkg/store/bucket.go
Expand Up @@ -1163,13 +1163,16 @@ func (b *blockSeriesClient) nextBatch(tenant string) error {
if len(postingsBatch) == 0 {
b.hasMorePostings = false
if b.lazyPostings.lazyExpanded() {
v, err := b.indexr.IndexVersion()
if err != nil {
return errors.Wrap(err, "get index version")
}
if v >= 2 {
for i := range b.expandedPostings {
b.expandedPostings[i] = b.expandedPostings[i] / 16
// No need to fetch index version again if lazy posting has 0 length.
if len(b.lazyPostings.postings) > 0 {
v, err := b.indexr.IndexVersion()
if err != nil {
return errors.Wrap(err, "get index version")
}
if v >= 2 {
for i := range b.expandedPostings {
b.expandedPostings[i] = b.expandedPostings[i] / 16
}
}
}
b.indexr.storeExpandedPostingsToCache(b.blockMatchers, index.NewListPostings(b.expandedPostings), len(b.expandedPostings), tenant)
Expand Down
35 changes: 35 additions & 0 deletions pkg/store/bucket_test.go
Expand Up @@ -1282,6 +1282,41 @@ func TestExpandedPostingsEmptyPostings(t *testing.T) {
testutil.Equals(t, 1, indexr.stats.cachedPostingsCompressions)
}

func TestLazyExpandedPostingsEmptyPostings(t *testing.T) {
tmpDir := t.TempDir()

bkt, err := filesystem.NewBucket(filepath.Join(tmpDir, "bkt"))
testutil.Ok(t, err)
defer func() { testutil.Ok(t, bkt.Close()) }()

id := uploadTestBlock(t, tmpDir, bkt, 100)

r, err := indexheader.NewBinaryReader(context.Background(), log.NewNopLogger(), bkt, tmpDir, id, DefaultPostingOffsetInMemorySampling, indexheader.NewBinaryReaderMetrics(nil))
testutil.Ok(t, err)
b := &bucketBlock{
logger: log.NewNopLogger(),
metrics: newBucketStoreMetrics(nil),
indexHeaderReader: r,
indexCache: noopCache{},
bkt: bkt,
meta: &metadata.Meta{BlockMeta: tsdb.BlockMeta{ULID: id}},
partitioner: NewGapBasedPartitioner(PartitionerMaxGapSize),
estimatedMaxSeriesSize: 20,
}

indexr := newBucketIndexReader(b)
// matcher1 and matcher2 will match nothing after intersection.
matcher1 := labels.MustNewMatcher(labels.MatchEqual, "j", "foo")
matcher2 := labels.MustNewMatcher(labels.MatchRegexp, "n", "1_.*")
matcher3 := labels.MustNewMatcher(labels.MatchRegexp, "i", ".+")
ctx := context.Background()
dummyCounter := promauto.With(prometheus.NewRegistry()).NewCounter(prometheus.CounterOpts{Name: "test"})
ps, err := indexr.ExpandedPostings(ctx, newSortedMatchers([]*labels.Matcher{matcher1, matcher2, matcher3}), NewBytesLimiterFactory(0)(nil), true, dummyCounter, tenancy.DefaultTenant)
testutil.Ok(t, err)
// We expect emptyLazyPostings rather than lazy postings with 0 length but with matchers.
testutil.Equals(t, ps, emptyLazyPostings)
}

func TestBucketSeries(t *testing.T) {
tb := testutil.NewTB(t)
storetestutil.RunSeriesInterestingCases(tb, 200e3, 200e3, func(t testutil.TB, samplesPerSeries, series int) {
Expand Down
3 changes: 3 additions & 0 deletions pkg/store/lazy_postings.go
Expand Up @@ -184,6 +184,9 @@ func fetchLazyExpandedPostings(
if err != nil {
return nil, err
}
if len(ps) == 0 {
return emptyLazyPostings, nil
}
return &lazyExpandedPostings{postings: ps, matchers: matchers}, nil
}

Expand Down