diff --git a/CHANGELOG.md b/CHANGELOG.md index 0235f027c8..54c3e029c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ We use *breaking :warning:* word for marking changes that are not backward compa * [#3095](https://github.com/thanos-io/thanos/pull/3095) Rule: update manager when all rule files are removed. * [#3098](https://github.com/thanos-io/thanos/pull/3098) ui: Fix Block Viewer for Compactor and Store * [#3105](https://github.com/thanos-io/thanos/pull/3105) Query: Fix overwriting maxSourceResolution when auto downsampling is enabled. +* [#3010](https://github.com/thanos-io/thanos/pull/3010) Querier: Added a flag to override the default look back delta in promql. The flag should be set to at least 2 times the slowest scrape interval or left unset to use the Prometheus defaults of 5min. --- sse_config: diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index d5060fae99..20a8803f72 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -70,6 +70,8 @@ func registerQuery(app *extkingpin.App) { maxConcurrentQueries := cmd.Flag("query.max-concurrent", "Maximum number of queries processed concurrently by query node."). Default("20").Int() + lookbackDelta := cmd.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations. PromQL always evaluates the query for the certain timestamp (query range timestamps are deduced by step). Since scrape intervals might be different, PromQL looks back for given amount of time to get latest sample. If it exceeds the maximum lookback delta it assumes series is stale and returns none (a gap). This is why lookback delta should be set to at least 2 times of the slowest scrape interval. If unset it will use the promql default of 5m").Duration() + maxConcurrentSelects := cmd.Flag("query.max-concurrent-select", "Maximum number of select requests made concurrently per a query."). Default("4").Int() @@ -174,6 +176,7 @@ func registerQuery(app *extkingpin.App) { *maxConcurrentQueries, *maxConcurrentSelects, time.Duration(*queryTimeout), + *lookbackDelta, time.Duration(*defaultEvaluationInterval), time.Duration(*storeResponseTimeout), *queryReplicaLabels, @@ -221,6 +224,7 @@ func runQuery( maxConcurrentQueries int, maxConcurrentSelects int, queryTimeout time.Duration, + lookbackDelta time.Duration, defaultEvaluationInterval time.Duration, storeResponseTimeout time.Duration, queryReplicaLabels []string, @@ -311,6 +315,7 @@ func runQuery( NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 { return defaultEvaluationInterval.Milliseconds() }, + LookbackDelta: lookbackDelta, }, ) ) diff --git a/docs/components/query.md b/docs/components/query.md index 1ef59c1f67..f676e3695c 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -367,6 +367,19 @@ Flags: --query.timeout=2m Maximum time to process query by query node. --query.max-concurrent=20 Maximum number of queries processed concurrently by query node. + --query.lookback-delta=QUERY.LOOKBACK-DELTA + The maximum lookback duration for retrieving + metrics during expression evaluations. PromQL + always evaluates the query for the certain + timestamp (query range timestamps are deduced + by step). Since scrape intervals might be + different, PromQL looks back for given amount + of time to get latest sample. If it exceeds the + maximum lookback delta it assumes series is + stale and returns none (a gap). This is why + lookback delta should be set to at least 2 + times of the slowest scrape interval. If unset + it will use the promql default of 5m --query.max-concurrent-select=4 Maximum number of select requests made concurrently per a query. diff --git a/pkg/query/iter.go b/pkg/query/iter.go index 901aba0f7f..ff2676dab9 100644 --- a/pkg/query/iter.go +++ b/pkg/query/iter.go @@ -302,7 +302,7 @@ type chunkSeriesIterator struct { func newChunkSeriesIterator(cs []chunkenc.Iterator) chunkenc.Iterator { if len(cs) == 0 { // This should not happen. StoreAPI implementations should not send empty results. - return errSeriesIterator{} + return errSeriesIterator{err: errors.Errorf("store returned an empty result")} } return &chunkSeriesIterator{chunks: cs} } @@ -503,7 +503,8 @@ func (it noopAdjustableSeriesIterator) adjustAtValue(float64) {} // Replica 1 counter scrapes: 20 30 40 Nan - 0 5 // Replica 2 counter scrapes: 25 35 45 Nan - 2 // -// Now for downsampling purposes we are accounting the resets so our replicas before going to dedup iterator looks like this: +// Now for downsampling purposes we are accounting the resets(rewriting the samples value) +// so our replicas before going to dedup iterator looks like this: // // Replica 1 counter total: 20 30 40 - - 40 45 // Replica 2 counter total: 25 35 45 - - 47 @@ -648,7 +649,7 @@ func (it *dedupSeriesIterator) Seek(t int64) bool { // Don't use underlying Seek, but iterate over next to not miss gaps. for { ts, _ := it.At() - if ts > 0 && ts >= t { + if ts >= t { return true } if !it.Next() {