-
Notifications
You must be signed in to change notification settings - Fork 16
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
OCPBUGS-18971: limit number of simultaneous client requests #76
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
/* | ||
Copyright 2017 The Kubernetes Authors. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package client | ||
|
||
import ( | ||
"context" | ||
"net/url" | ||
|
||
"k8s.io/component-base/metrics" | ||
"k8s.io/component-base/metrics/legacyregistry" | ||
) | ||
|
||
// Without a limit, the adapter could flood the Prometheus API with many | ||
// requests when there are many pods running in the cluster because a query for | ||
// getting pod metrics across all namespaces translates into (2 x the number of | ||
// namespaces) queries to the Prometheus API. | ||
// In the worst case, the Prometheus pods can hit the maximum number of | ||
// listening sockets allowed by the kernel (e.g. SOMAXCONN) leading to | ||
// timed-out requests from other clients. In particular it can make the Kubelet | ||
// liveness probes being reported as down and trigger Prometheus pod restarts. | ||
// The number has been chosen from empirical data. | ||
const maxConcurrentRequests = 100 | ||
|
||
var ( | ||
inflightRequests = metrics.NewGauge( | ||
&metrics.GaugeOpts{ | ||
Namespace: "prometheus_adapter", | ||
Subsystem: "prometheus_client", | ||
Name: "inflight_requests", | ||
Help: "Number of inflight requests to the Prometheus service", | ||
}) | ||
|
||
maxRequests = metrics.NewGauge( | ||
&metrics.GaugeOpts{ | ||
Namespace: "prometheus_adapter", | ||
Subsystem: "prometheus_client", | ||
Name: "max_requests", | ||
Help: "Maximum number of requests to the Prometheus service", | ||
}) | ||
) | ||
|
||
func init() { | ||
legacyregistry.MustRegister(inflightRequests, maxRequests) | ||
maxRequests.Set(maxConcurrentRequests) | ||
} | ||
|
||
type requestLimitClient struct { | ||
c GenericAPIClient | ||
inflight chan struct{} | ||
} | ||
|
||
func newRequestLimiter(c GenericAPIClient) GenericAPIClient { | ||
return &requestLimitClient{ | ||
c: c, | ||
inflight: make(chan struct{}, maxConcurrentRequests), | ||
} | ||
} | ||
|
||
func (c *requestLimitClient) Do(ctx context.Context, verb, endpoint string, query url.Values) (APIResponse, error) { | ||
select { | ||
case c.inflight <- struct{}{}: | ||
inflightRequests.Inc() | ||
defer func() { | ||
inflightRequests.Dec() | ||
<-c.inflight | ||
}() | ||
case <-ctx.Done(): | ||
return APIResponse{}, ctx.Err() | ||
} | ||
|
||
return c.c.Do(ctx, verb, endpoint, query) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,118 @@ | ||
/* | ||
Copyright 2017 The Kubernetes Authors. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package client | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"net/http" | ||
"net/http/httptest" | ||
"net/url" | ||
"sync" | ||
"sync/atomic" | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestRequestLimitClient(t *testing.T) { | ||
var ( | ||
ctx = context.Background() | ||
total atomic.Int64 | ||
unblock = make(chan struct{}) | ||
) | ||
|
||
srvCtx, srvCancel := context.WithCancel(ctx) | ||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { | ||
total.Add(1) | ||
|
||
w.Write([]byte("{}")) | ||
if r.URL.Path == "/nonblocking" { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Knowing that There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To ensure that the test catches the issue when/if There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. without the |
||
return | ||
} | ||
|
||
// Requests will be blocked until the test closes the unblock channel or the test fails. | ||
select { | ||
case <-unblock: | ||
case <-srvCtx.Done(): | ||
} | ||
})) | ||
defer func() { | ||
srvCancel() | ||
srv.Close() | ||
}() | ||
|
||
// Make as many requests as the max allowed number + 1. | ||
var ( | ||
wg sync.WaitGroup | ||
errChan = make(chan error, maxConcurrentRequests+1) | ||
u, _ = url.Parse(srv.URL) | ||
c = NewGenericAPIClient(&http.Client{}, u, nil) | ||
do = func(i int) { | ||
defer wg.Done() | ||
|
||
_, err := c.Do(ctx, "GET", "/", nil) | ||
if err != nil { | ||
err = fmt.Errorf("request #%d: %w", i, err) | ||
} | ||
errChan <- err | ||
} | ||
) | ||
for i := 0; i < maxConcurrentRequests; i++ { | ||
wg.Add(1) | ||
go func(i int) { | ||
do(i) | ||
}(i) | ||
} | ||
|
||
// Wait for the first maxConcurrentRequests requests to hit the server. | ||
for total.Load() != maxConcurrentRequests { | ||
} | ||
|
||
// Make one more request which should be blocked at the client level. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just for my edification, why do the 101st request separately? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. because it's easier to understand what the test does + L82 ensures that the first 100 requests have hit the server. |
||
wg.Add(1) | ||
go func() { | ||
do(maxConcurrentRequests) | ||
}() | ||
|
||
// Make one more request which should be canceled before hitting the server. | ||
ctx2, _ := context.WithTimeout(ctx, time.Second) | ||
_, err := c.Do(ctx2, "GET", "/nonblocking", nil) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. should we check that it returns an There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. since it returns an error, the first returned value is irrelevant IMHO. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Just a safeguard, in case the receiver doesn't start by checking the error. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not sure to understand :) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we assume the function calling this |
||
switch { | ||
case err == nil: | ||
t.Fatal("expected request to fail") | ||
case ctx2.Err() == nil: | ||
t.Fatal("expected request to timeout") | ||
} | ||
|
||
if total.Load() != maxConcurrentRequests { | ||
t.Fatalf("expected %d requests on the server side, got %d", maxConcurrentRequests, total.Load()) | ||
} | ||
|
||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. we can also add a check There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it should be detected by the Do() request returning without error. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The client/Do function may be faulty, it's better to check from server side as well. (+ check that the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it spawns maxConcurrentRequests + 1 = 101 (for i := 0; i < maxConcurrentRequests+1; i++) IIUC. |
||
// Release all inflight requests. | ||
close(unblock) | ||
|
||
// Wait for all requests to complete. | ||
wg.Wait() | ||
|
||
// Check that no error was returned. | ||
close(errChan) | ||
for err := range errChan { | ||
if err != nil { | ||
t.Fatalf("unexpected error: %s", err) | ||
} | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
no power of 2, I'm disappointed :)
At worst a Prometheus pod will serve both prom-adapter (if the other is down or LB missed it) or even 3 or 4 in some weird rolling scenarios which results in 4XX connections < --web.max-connections=512, which leaves room in that queue and the other ones for the other clients.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
While testing, I've realized that even with 2 prometheus adapter pods and 2 Prometheus pods running, it can be that all adapter requests go to the same Prometheus because the service is configured with client IP affinity.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, had some similar cases as well while testing, didn't dig into how LB is set.
I think 100 is a good value.