Skip to content

Commit

Permalink
Fix returned status code on max samples limit hit when query sharding…
Browse files Browse the repository at this point in the history
… is enabled (#499)

Signed-off-by: Marco Pracucci <marco@pracucci.com>
  • Loading branch information
pracucci committed Nov 16, 2021
1 parent c61b0bf commit c0cf908
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 11 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
* [BUGFIX] Ingester: don't create TSDB or appender if no samples are sent by a tenant. #162
* [BUGFIX] Alertmanager: don't replace user configurations with blank fallback configurations (when enabled), particularly during scaling up/down instances when sharding is enabled. #224
* [BUGFIX] Query-tee: Ensure POST requests are handled correctly #286
* [BUGFIX] Query-frontend: Ensure query_range requests handled by the query-frontend return JSON formatted errors. #360
* [BUGFIX] Query-frontend: Ensure query_range requests handled by the query-frontend return JSON formatted errors. #360 #499
* [BUGFIX] Query-frontend: don't reuse cached results for queries that are not step-aligned. #424
* [BUGFIX] Querier: fixed UserStats endpoint. When zone-aware replication is enabled, `MaxUnavailableZones` param is used instead of `MaxErrors`, so setting `MaxErrors = 0` doesn't make the Querier wait for all Ingesters responses. #474

Expand Down
30 changes: 27 additions & 3 deletions integration/query_frontend_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,6 +337,7 @@ func TestQueryFrontendErrorMessageParity(t *testing.T) {
"-querier.cache-results": "true",
"-querier.split-queries-by-interval": "24h",
"-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range
"-querier.max-samples": "2", // Very low limit so that we can easily hit it.
})
consul := e2edb.NewConsul()
require.NoError(t, s.StartAndWaitReady(consul))
Expand All @@ -347,22 +348,37 @@ func TestQueryFrontendErrorMessageParity(t *testing.T) {
flags["-querier.frontend-address"] = queryFrontend.NetworkGRPCEndpoint()

// Start all other services.
distributor := e2emimir.NewDistributorWithConfigFile("distributor", consul.NetworkHTTPEndpoint(), configFile, flags, "")
ingester := e2emimir.NewIngesterWithConfigFile("ingester", consul.NetworkHTTPEndpoint(), configFile, flags, "")
querier := e2emimir.NewQuerierWithConfigFile("querier", consul.NetworkHTTPEndpoint(), configFile, flags, "")

require.NoError(t, s.StartAndWaitReady(querier, ingester))
require.NoError(t, s.StartAndWaitReady(distributor, querier, ingester))
require.NoError(t, s.WaitReady(queryFrontend))

// Wait until both the distributor and querier have updated the ring.
require.NoError(t, distributor.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total"))
require.NoError(t, querier.WaitSumMetrics(e2e.Equals(512), "cortex_ring_tokens_total"))

now := time.Now()

// Push some series.
cWrite, err := e2emimir.NewClient(distributor.HTTPEndpoint(), "", "", "", "fake")
require.NoError(t, err)

for i := 0; i < 10; i++ {
series, _ := generateSeries(fmt.Sprintf("series_%d", i), now)

res, err := cWrite.Push(series)
require.NoError(t, err)
require.Equal(t, 200, res.StatusCode)
}

cQueryFrontend, err := e2emimir.NewClient("", queryFrontend.HTTPEndpoint(), "", "", "fake")
require.NoError(t, err)

cQuerier, err := e2emimir.NewClient("", querier.HTTPEndpoint(), "", "", "fake")
require.NoError(t, err)

now := time.Now()

for _, tc := range []struct {
name string
query func(*e2emimir.Client) (*http.Response, []byte, error)
Expand Down Expand Up @@ -439,6 +455,14 @@ func TestQueryFrontendErrorMessageParity(t *testing.T) {
expStatusCode: http.StatusBadRequest,
expBody: `{"error":"invalid parameter \"start\": cannot parse \"depths-of-time\" to a valid timestamp", "errorType":"bad_data", "status":"error"}`,
},
{
name: "max samples limit hit",
query: func(c *e2emimir.Client) (*http.Response, []byte, error) {
return c.QueryRangeRaw(`{__name__=~"series_.+"}`, now.Add(-time.Minute), now, time.Minute)
},
expStatusCode: http.StatusUnprocessableEntity,
expBody: `{"error":"query processing would load too many samples into memory in query execution", "errorType":"execution", "status":"error"}`,
},
} {
t.Run(tc.name, func(t *testing.T) {
resp, body, err := tc.query(cQuerier)
Expand Down
2 changes: 2 additions & 0 deletions pkg/querier/queryrange/querysharding.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,8 @@ func mapEngineError(err error) error {
errorType = apierror.TypeTimeout
case promql.ErrStorage:
errorType = apierror.TypeInternal
case promql.ErrTooManySamples:
errorType = apierror.TypeExec
}

return apierror.New(errorType, err.Error())
Expand Down
8 changes: 1 addition & 7 deletions pkg/querier/queryrange/querysharding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -814,7 +814,7 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) {
name: "downstream - sample limit",
engineDownstream: engineSampleLimit,
engineSharding: engine,
expError: apierror.New(apierror.TypeInternal, "query processing would load too many samples into memory in query execution"),
expError: apierror.New(apierror.TypeExec, "query processing would load too many samples into memory in query execution"),
},
{
name: "sharding - timeout",
Expand All @@ -823,12 +823,6 @@ func TestQuerySharding_ShouldReturnErrorInCorrectFormat(t *testing.T) {
expError: apierror.New(apierror.TypeTimeout, "query timed out in expression evaluation"),
queryable: queryableSlow,
},
{
name: "downstream - sample limit",
engineDownstream: engine,
engineSharding: engineSampleLimit,
expError: apierror.New(apierror.TypeInternal, "query processing would load too many samples into memory in query execution"),
},
{
name: "downstream - storage",
engineDownstream: engine,
Expand Down

0 comments on commit c0cf908

Please sign in to comment.