-
Notifications
You must be signed in to change notification settings - Fork 400
/
maxinflight.go
220 lines (188 loc) · 7.04 KB
/
maxinflight.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package filters
import (
"fmt"
"net/http"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/endpoints/metrics"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics"
"k8s.io/klog/v2"
)
const (
// Constant for the retry-after interval on rate limiting.
// TODO: maybe make this dynamic? or user-adjustable?
retryAfter = "1"
// How often inflight usage metric should be updated. Because
// the metrics tracks maximal value over period making this
// longer will increase the metric value.
inflightUsageMetricUpdatePeriod = time.Second
// How often to run maintenance on observations to ensure
// that they do not fall too far behind.
observationMaintenancePeriod = 10 * time.Second
)
var nonMutatingRequestVerbs = sets.NewString("get", "list", "watch")
func handleError(w http.ResponseWriter, r *http.Request, err error) {
errorMsg := fmt.Sprintf("Internal Server Error: %#v", r.RequestURI)
http.Error(w, errorMsg, http.StatusInternalServerError)
klog.Errorf(err.Error())
}
// requestWatermark is used to track maximal numbers of requests in a particular phase of handling
type requestWatermark struct {
phase string
readOnlyObserver, mutatingObserver fcmetrics.TimedObserver
lock sync.Mutex
readOnlyWatermark, mutatingWatermark int
}
func (w *requestWatermark) recordMutating(mutatingVal int) {
w.mutatingObserver.Set(float64(mutatingVal))
w.lock.Lock()
defer w.lock.Unlock()
if w.mutatingWatermark < mutatingVal {
w.mutatingWatermark = mutatingVal
}
}
func (w *requestWatermark) recordReadOnly(readOnlyVal int) {
w.readOnlyObserver.Set(float64(readOnlyVal))
w.lock.Lock()
defer w.lock.Unlock()
if w.readOnlyWatermark < readOnlyVal {
w.readOnlyWatermark = readOnlyVal
}
}
// watermark tracks requests being executed (not waiting in a queue)
var watermark = &requestWatermark{
phase: metrics.ExecutingPhase,
readOnlyObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.ReadOnlyKind}).RequestsExecuting,
mutatingObserver: fcmetrics.ReadWriteConcurrencyObserverPairGenerator.Generate(1, 1, []string{metrics.MutatingKind}).RequestsExecuting,
}
// startWatermarkMaintenance starts the goroutines to observe and maintain the specified watermark.
func startWatermarkMaintenance(watermark *requestWatermark, stopCh <-chan struct{}) {
// Periodically update the inflight usage metric.
go wait.Until(func() {
watermark.lock.Lock()
readOnlyWatermark := watermark.readOnlyWatermark
mutatingWatermark := watermark.mutatingWatermark
watermark.readOnlyWatermark = 0
watermark.mutatingWatermark = 0
watermark.lock.Unlock()
metrics.UpdateInflightRequestMetrics(watermark.phase, readOnlyWatermark, mutatingWatermark)
}, inflightUsageMetricUpdatePeriod, stopCh)
// Periodically observe the watermarks. This is done to ensure that they do not fall too far behind. When they do
// fall too far behind, then there is a long delay in responding to the next request received while the observer
// catches back up.
go wait.Until(func() {
watermark.readOnlyObserver.Add(0)
watermark.mutatingObserver.Add(0)
}, observationMaintenancePeriod, stopCh)
}
// WithMaxInFlightLimit limits the number of in-flight requests to buffer size of the passed in channel.
func WithMaxInFlightLimit(
handler http.Handler,
nonMutatingLimit int,
mutatingLimit int,
longRunningRequestCheck apirequest.LongRunningRequestCheck,
) http.Handler {
if nonMutatingLimit == 0 && mutatingLimit == 0 {
return handler
}
var nonMutatingChan chan bool
var mutatingChan chan bool
if nonMutatingLimit != 0 {
nonMutatingChan = make(chan bool, nonMutatingLimit)
watermark.readOnlyObserver.SetX1(float64(nonMutatingLimit))
}
if mutatingLimit != 0 {
mutatingChan = make(chan bool, mutatingLimit)
watermark.mutatingObserver.SetX1(float64(mutatingLimit))
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
requestInfo, ok := apirequest.RequestInfoFrom(ctx)
if !ok {
handleError(w, r, fmt.Errorf("no RequestInfo found in context, handler chain must be wrong"))
return
}
// Skip tracking long running events.
if longRunningRequestCheck != nil && longRunningRequestCheck(r, requestInfo) {
handler.ServeHTTP(w, r)
return
}
var c chan bool
isMutatingRequest := !nonMutatingRequestVerbs.Has(requestInfo.Verb)
if isMutatingRequest {
c = mutatingChan
} else {
c = nonMutatingChan
}
if c == nil {
handler.ServeHTTP(w, r)
} else {
select {
case c <- true:
// We note the concurrency level both while the
// request is being served and after it is done being
// served, because both states contribute to the
// sampled stats on concurrency.
if isMutatingRequest {
watermark.recordMutating(len(c))
} else {
watermark.recordReadOnly(len(c))
}
defer func() {
<-c
if isMutatingRequest {
watermark.recordMutating(len(c))
} else {
watermark.recordReadOnly(len(c))
}
}()
handler.ServeHTTP(w, r)
default:
// at this point we're about to return a 429, BUT not all actors should be rate limited. A system:master is so powerful
// that they should always get an answer. It's a super-admin or a loopback connection.
if currUser, ok := apirequest.UserFrom(ctx); ok {
for _, group := range currUser.GetGroups() {
if group == user.SystemPrivilegedGroup {
handler.ServeHTTP(w, r)
return
}
}
}
// We need to split this data between buckets used for throttling.
if isMutatingRequest {
metrics.DroppedRequests.WithLabelValues(metrics.MutatingKind).Inc()
} else {
metrics.DroppedRequests.WithLabelValues(metrics.ReadOnlyKind).Inc()
}
metrics.RecordRequestTermination(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests)
tooManyRequests(r, w)
}
}
})
}
// StartMaxInFlightWatermarkMaintenance starts the goroutines to observe and maintain watermarks for max-in-flight
// requests.
func StartMaxInFlightWatermarkMaintenance(stopCh <-chan struct{}) {
startWatermarkMaintenance(watermark, stopCh)
}
func tooManyRequests(req *http.Request, w http.ResponseWriter) {
// Return a 429 status indicating "Too Many Requests"
w.Header().Set("Retry-After", retryAfter)
http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests)
}