-
Notifications
You must be signed in to change notification settings - Fork 38.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Rate limitting requeue #24052
Merged
k8s-github-robot
merged 1 commit into
kubernetes:master
from
deads2k:rate-limitting-requeue
May 14, 2016
Merged
Rate limitting requeue #24052
Changes from all commits
Commits
File filter
Filter by extension
Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,204 @@ | ||
/* | ||
Copyright 2016 The Kubernetes Authors All rights reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package workqueue | ||
|
||
import ( | ||
"math" | ||
"sync" | ||
"time" | ||
|
||
"github.com/juju/ratelimit" | ||
) | ||
|
||
type RateLimiter interface { | ||
// When gets an item and gets to decide how long that item should wait | ||
When(item interface{}) time.Duration | ||
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing | ||
// or for success, we'll stop tracking it | ||
Forget(item interface{}) | ||
// NumRequeues returns back how many failures the item has had | ||
NumRequeues(item interface{}) int | ||
} | ||
|
||
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has | ||
// both overall and per-item rate limitting. The overall is a token bucket and the per-item is exponential | ||
func DefaultControllerRateLimiter() RateLimiter { | ||
return NewMaxOfRateLimiter( | ||
DefaultItemBasedRateLimiter(), | ||
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) | ||
&BucketRateLimiter{Bucket: ratelimit.NewBucketWithRate(float64(10), int64(100))}, | ||
) | ||
} | ||
|
||
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API | ||
type BucketRateLimiter struct { | ||
*ratelimit.Bucket | ||
} | ||
|
||
var _ RateLimiter = &BucketRateLimiter{} | ||
|
||
func (r *BucketRateLimiter) When(item interface{}) time.Duration { | ||
return r.Bucket.Take(1) | ||
} | ||
|
||
func (r *BucketRateLimiter) NumRequeues(item interface{}) int { | ||
return 0 | ||
} | ||
|
||
func (r *BucketRateLimiter) Forget(item interface{}) { | ||
} | ||
|
||
// ItemExponentialFailureRateLimiter does a simple baseDelay*10^<num-failures> limit | ||
// dealing with max failures and expiration are up to the caller | ||
type ItemExponentialFailureRateLimiter struct { | ||
failuresLock sync.Mutex | ||
failures map[interface{}]int | ||
|
||
baseDelay time.Duration | ||
maxDelay time.Duration | ||
} | ||
|
||
var _ RateLimiter = &ItemExponentialFailureRateLimiter{} | ||
|
||
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter { | ||
return &ItemExponentialFailureRateLimiter{ | ||
failures: map[interface{}]int{}, | ||
baseDelay: baseDelay, | ||
maxDelay: maxDelay, | ||
} | ||
} | ||
|
||
func DefaultItemBasedRateLimiter() RateLimiter { | ||
return NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1000*time.Second) | ||
} | ||
|
||
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
r.failures[item] = r.failures[item] + 1 | ||
|
||
calculated := r.baseDelay * time.Duration(math.Pow10(r.failures[item]-1)) | ||
if calculated > r.maxDelay { | ||
return r.maxDelay | ||
} | ||
|
||
return calculated | ||
} | ||
|
||
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
return r.failures[item] | ||
} | ||
|
||
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
delete(r.failures, item) | ||
} | ||
|
||
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that | ||
type ItemFastSlowRateLimiter struct { | ||
failuresLock sync.Mutex | ||
failures map[interface{}]int | ||
|
||
maxFastAttempts int | ||
fastDelay time.Duration | ||
slowDelay time.Duration | ||
} | ||
|
||
var _ RateLimiter = &ItemFastSlowRateLimiter{} | ||
|
||
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter { | ||
return &ItemFastSlowRateLimiter{ | ||
failures: map[interface{}]int{}, | ||
fastDelay: fastDelay, | ||
slowDelay: slowDelay, | ||
maxFastAttempts: maxFastAttempts, | ||
} | ||
} | ||
|
||
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
r.failures[item] = r.failures[item] + 1 | ||
|
||
if r.failures[item] <= r.maxFastAttempts { | ||
return r.fastDelay | ||
} | ||
|
||
return r.slowDelay | ||
} | ||
|
||
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
return r.failures[item] | ||
} | ||
|
||
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) { | ||
r.failuresLock.Lock() | ||
defer r.failuresLock.Unlock() | ||
|
||
delete(r.failures, item) | ||
} | ||
|
||
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Note that waiting extra wrt a particular limiter could cause it to apparently exceed its burst setting. |
||
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items | ||
// were separately delayed a longer time. | ||
type MaxOfRateLimiter struct { | ||
limiters []RateLimiter | ||
} | ||
|
||
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration { | ||
ret := time.Duration(0) | ||
for _, limiter := range r.limiters { | ||
curr := limiter.When(item) | ||
if curr > ret { | ||
ret = curr | ||
} | ||
} | ||
|
||
return ret | ||
} | ||
|
||
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter { | ||
return &MaxOfRateLimiter{limiters: limiters} | ||
} | ||
|
||
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int { | ||
ret := 0 | ||
for _, limiter := range r.limiters { | ||
curr := limiter.NumRequeues(item) | ||
if curr > ret { | ||
ret = curr | ||
} | ||
} | ||
|
||
return ret | ||
} | ||
|
||
func (r *MaxOfRateLimiter) Forget(item interface{}) { | ||
for _, limiter := range r.limiters { | ||
limiter.Forget(item) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,151 @@ | ||
/* | ||
Copyright 2016 The Kubernetes Authors All rights reserved. | ||
|
||
Licensed under the Apache License, Version 2.0 (the "License"); | ||
you may not use this file except in compliance with the License. | ||
You may obtain a copy of the License at | ||
|
||
http://www.apache.org/licenses/LICENSE-2.0 | ||
|
||
Unless required by applicable law or agreed to in writing, software | ||
distributed under the License is distributed on an "AS IS" BASIS, | ||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
See the License for the specific language governing permissions and | ||
limitations under the License. | ||
*/ | ||
|
||
package workqueue | ||
|
||
import ( | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestItemExponentialFailureRateLimiter(t *testing.T) { | ||
limiter := NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second) | ||
|
||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 100*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 1*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 1*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
if e, a := 1*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 2, limiter.NumRequeues("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
limiter.Forget("one") | ||
if e, a := 0, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 1*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
} | ||
|
||
func TestItemFastSlowRateLimiter(t *testing.T) { | ||
limiter := NewItemFastSlowRateLimiter(5*time.Millisecond, 10*time.Second, 3) | ||
|
||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 2, limiter.NumRequeues("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
limiter.Forget("one") | ||
if e, a := 0, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
} | ||
|
||
func TestMaxOfRateLimiter(t *testing.T) { | ||
limiter := NewMaxOfRateLimiter( | ||
NewItemFastSlowRateLimiter(5*time.Millisecond, 3*time.Second, 3), | ||
NewItemExponentialFailureRateLimiter(1*time.Millisecond, 1*time.Second), | ||
) | ||
|
||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 100*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 3*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 3*time.Second, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
if e, a := 5*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 10*time.Millisecond, limiter.When("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 2, limiter.NumRequeues("two"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
limiter.Forget("one") | ||
if e, a := 0, limiter.NumRequeues("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
if e, a := 5*time.Millisecond, limiter.When("one"); e != a { | ||
t.Errorf("expected %v, got %v", e, a) | ||
} | ||
|
||
} |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We already have some rate limiting stuff in util/? Is it not reusable here? I'd prefer not to add a rate limiter to the workqueue package.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's a slight different interface for the queuer to allow it automatically backoff per-item as well as per-queue. There is an adapter for handling the bucket rate limiter we use as a godep so that you can reliably rate limit both the controller and the items to prevent excessive burst utilization by a single item.