diff --git a/CHANGELOG.md b/CHANGELOG.md index e33e5652704..5b7dac9be55 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ We use *breaking :warning:* word for marking changes that are not backward compa * [#3095](https://github.com/thanos-io/thanos/pull/3095) Rule: update manager when all rule files are removed. * [#3098](https://github.com/thanos-io/thanos/pull/3098) ui: Fix Block Viewer for Compactor and Store * [#3105](https://github.com/thanos-io/thanos/pull/3105) Query: Fix overwriting maxSourceResolution when auto downsampling is enabled. +* [#3010](https://github.com/thanos-io/thanos/pull/3010) Querier: Added a flag to override the default look back delta in promql. The flag should be set to at least 2 times the slowest scrape interval. ## [v0.15.0-rc.0](https://github.com/thanos-io/thanos/releases/tag/v0.15.0-rc.0) - 2020.08.26 diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index f967d8538ca..a66435bc4cf 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -70,6 +70,8 @@ func registerQuery(m map[string]setupFunc, app *kingpin.Application) { maxConcurrentQueries := cmd.Flag("query.max-concurrent", "Maximum number of queries processed concurrently by query node."). Default("20").Int() + lookbackDelta := cmd.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations. For example with a look back of 7min and a scrape interval of 10min, promql will fill in the gap only for 7min and will show the other 3min as a gap. Should be set to at least 2 times the slowest scrape interval.").Duration() + maxConcurrentSelects := cmd.Flag("query.max-concurrent-select", "Maximum number of select requests made concurrently per a query."). Default("4").Int() @@ -175,6 +177,7 @@ func registerQuery(m map[string]setupFunc, app *kingpin.Application) { *maxConcurrentQueries, *maxConcurrentSelects, time.Duration(*queryTimeout), + *lookbackDelta, time.Duration(*defaultEvaluationInterval), time.Duration(*storeResponseTimeout), *queryReplicaLabels, @@ -222,6 +225,7 @@ func runQuery( maxConcurrentQueries int, maxConcurrentSelects int, queryTimeout time.Duration, + lookbackDelta time.Duration, defaultEvaluationInterval time.Duration, storeResponseTimeout time.Duration, queryReplicaLabels []string, @@ -312,6 +316,7 @@ func runQuery( NoStepSubqueryIntervalFn: func(rangeMillis int64) int64 { return defaultEvaluationInterval.Milliseconds() }, + LookbackDelta: lookbackDelta, }, ) ) diff --git a/docs/components/query.md b/docs/components/query.md index 1ef59c1f67f..8acfa7c657b 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -367,6 +367,14 @@ Flags: --query.timeout=2m Maximum time to process query by query node. --query.max-concurrent=20 Maximum number of queries processed concurrently by query node. + --query.lookback-delta=QUERY.LOOKBACK-DELTA + The maximum lookback duration for retrieving + metrics during expression evaluations. For + example with a look back of 7min and a scrape + interval of 10min, promql will fill in the gap + only for 7min and will show the other 3min as a + gap. Should be set to at least 2 times the + slowest scrape interval. --query.max-concurrent-select=4 Maximum number of select requests made concurrently per a query. diff --git a/pkg/query/iter.go b/pkg/query/iter.go index 901aba0f7f3..ff2676dab9f 100644 --- a/pkg/query/iter.go +++ b/pkg/query/iter.go @@ -302,7 +302,7 @@ type chunkSeriesIterator struct { func newChunkSeriesIterator(cs []chunkenc.Iterator) chunkenc.Iterator { if len(cs) == 0 { // This should not happen. StoreAPI implementations should not send empty results. - return errSeriesIterator{} + return errSeriesIterator{err: errors.Errorf("store returned an empty result")} } return &chunkSeriesIterator{chunks: cs} } @@ -503,7 +503,8 @@ func (it noopAdjustableSeriesIterator) adjustAtValue(float64) {} // Replica 1 counter scrapes: 20 30 40 Nan - 0 5 // Replica 2 counter scrapes: 25 35 45 Nan - 2 // -// Now for downsampling purposes we are accounting the resets so our replicas before going to dedup iterator looks like this: +// Now for downsampling purposes we are accounting the resets(rewriting the samples value) +// so our replicas before going to dedup iterator looks like this: // // Replica 1 counter total: 20 30 40 - - 40 45 // Replica 2 counter total: 25 35 45 - - 47 @@ -648,7 +649,7 @@ func (it *dedupSeriesIterator) Seek(t int64) bool { // Don't use underlying Seek, but iterate over next to not miss gaps. for { ts, _ := it.At() - if ts > 0 && ts >= t { + if ts >= t { return true } if !it.Next() {