Permalink
Switch branches/tags
v9.0.0 v9.0.0-invalid v8.0.0 v7.0.0 v6.0.0 v5.0.1 v5.0.0 v4.0.0 v4.0.0-beta.0 v3.0.0 v3.0.0-beta.0 v2.0.0 v2.0.0-alpha.1 v2.0.0-alpha.0 v1.5.1 v1.5.0 v1.4.0 kubernetes-1.14.0-alpha.0 kubernetes-1.13.0-beta.0 kubernetes-1.13.0-alpha.3 kubernetes-1.13.0-alpha.2 kubernetes-1.13.0-alpha.1 kubernetes-1.13.0-alpha.0 kubernetes-1.12.3-beta.0 kubernetes-1.12.2 kubernetes-1.12.2-beta.0 kubernetes-1.12.1 kubernetes-1.12.1-beta.0 kubernetes-1.12.1-beta.0-invalid kubernetes-1.12.0 kubernetes-1.12.0-rc.2 kubernetes-1.12.0-rc.2-invalid kubernetes-1.12.0-rc.1 kubernetes-1.12.0-rc.1-invalid kubernetes-1.12.0-invalid kubernetes-1.12.0-beta.2 kubernetes-1.12.0-beta.2-invalid kubernetes-1.12.0-beta.1 kubernetes-1.12.0-beta.0 kubernetes-1.12.0-alpha.1 kubernetes-1.12.0-alpha.0 kubernetes-1.11.5-beta.0 kubernetes-1.11.4 kubernetes-1.11.4-beta.0 kubernetes-1.11.3 kubernetes-1.11.3-beta.0 kubernetes-1.11.2 kubernetes-1.11.2-beta.0 kubernetes-1.11.1 kubernetes-1.11.1-beta.0 kubernetes-1.11.0 kubernetes-1.11.0-rc.3 kubernetes-1.11.0-rc.2 kubernetes-1.11.0-rc.1 kubernetes-1.11.0-beta.2 kubernetes-1.11.0-beta.1 kubernetes-1.11.0-beta.0 kubernetes-1.11.0-alpha.2 kubernetes-1.11.0-alpha.1 kubernetes-1.11.0-alpha.0 kubernetes-1.10.10-beta.0 kubernetes-1.10.9 kubernetes-1.10.9-beta.0 kubernetes-1.10.8 kubernetes-1.10.8-beta.0 kubernetes-1.10.7 kubernetes-1.10.7-beta.0 kubernetes-1.10.6 kubernetes-1.10.6-beta.0 kubernetes-1.10.5 kubernetes-1.10.5-beta.0 kubernetes-1.10.4 kubernetes-1.10.4-beta.0 kubernetes-1.10.3 kubernetes-1.10.3-beta.0 kubernetes-1.10.2 kubernetes-1.10.2-beta.0 kubernetes-1.10.1 kubernetes-1.10.1-beta.0 kubernetes-1.10.0 kubernetes-1.10.0-rc.1 kubernetes-1.10.0-beta.4 kubernetes-1.10.0-beta.3 kubernetes-1.10.0-beta.2 kubernetes-1.10.0-beta.1 kubernetes-1.10.0-beta.0 kubernetes-1.10.0-alpha.3 kubernetes-1.10.0-alpha.2 kubernetes-1.10.0-alpha.1 kubernetes-1.10.0-alpha.0 kubernetes-1.9.12-beta.0 kubernetes-1.9.11 kubernetes-1.9.11-beta.0 kubernetes-1.9.10 kubernetes-1.9.10-beta.0 kubernetes-1.9.9 kubernetes-1.9.9-beta.0 kubernetes-1.9.8 kubernetes-1.9.8-beta.0 kubernetes-1.9.7
Nothing to show
Find file Copy path
Fetching contributors…
Cannot retrieve contributors at this time
213 lines (172 sloc) 4.73 KB
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
type Interface interface {
Add(item interface{})
Len() int
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShuttingDown() bool
}
// New constructs a new work queue (see the package comment).
func New() *Type {
return NewNamed("")
}
func NewNamed(name string) *Type {
rc := clock.RealClock{}
return newQueue(
rc,
globalMetricsFactory.newQueueMetrics(name, rc),
defaultUnfinishedWorkUpdatePeriod,
)
}
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
processing: set{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
go t.updateUnfinishedWorkLoop()
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
type Type struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue []t
// dirty defines all of the items that need to be processed.
dirty set
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set
cond *sync.Cond
shuttingDown bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock
}
type empty struct{}
type t interface{}
type set map[t]empty
func (s set) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set) insert(item t) {
s[item] = empty{}
}
func (s set) delete(item t) {
delete(s, item)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue = append(q.queue, item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Type) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return len(q.queue)
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Type) Get() (item interface{}, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for len(q.queue) == 0 && !q.shuttingDown {
q.cond.Wait()
}
if len(q.queue) == 0 {
// We must be shutting down.
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Type) Done(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
func (q *Type) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true
q.cond.Broadcast()
}
func (q *Type) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Type) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}