-
Notifications
You must be signed in to change notification settings - Fork 153
/
dispatcher.go
170 lines (149 loc) · 4.1 KB
/
dispatcher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
package execute
import (
"context"
"sync"
"go.uber.org/zap"
)
// Dispatcher schedules work for a query.
// Each transformation submits work to be done to the dispatcher.
// Then the dispatcher schedules to work based on the available resources.
type Dispatcher interface {
// Schedule fn to be executed
Schedule(fn ScheduleFunc)
}
// ScheduleFunc is a function that represents work to do.
// The throughput is the maximum number of messages to process for this scheduling.
type ScheduleFunc func(ctx context.Context, throughput int)
// poolDispatcher implements Dispatcher using a pool of goroutines.
type poolDispatcher struct {
work *ring
ready chan struct{}
workMu sync.Mutex
throughput int
mu sync.Mutex
closed bool
closing chan struct{}
wg sync.WaitGroup
err error
errC chan error
logger *zap.Logger
}
func newPoolDispatcher(throughput int, logger *zap.Logger) *poolDispatcher {
return &poolDispatcher{
throughput: throughput,
work: newRing(100),
ready: make(chan struct{}, 1),
closing: make(chan struct{}),
errC: make(chan error, 1),
logger: logger.With(zap.String("component", "dispatcher")),
}
}
func (d *poolDispatcher) Schedule(fn ScheduleFunc) {
d.workMu.Lock()
defer d.workMu.Unlock()
// Schedule the work and then report to the channel that there
// is available work to unblock the worker scheduler thread.
d.work.Append(fn)
select {
case d.ready <- struct{}{}:
// The ready channel should have a buffer of 1.
// Work being present is a binary yes or no.
// If we say yes multiple times, we only need to read it once
// in the outermost run loop.
default:
}
}
func (d *poolDispatcher) Start(n int, ctx context.Context) {
d.wg.Add(n)
for i := 0; i < n; i++ {
go func() {
defer d.wg.Done()
// Setup panic handling on the worker goroutines
defer d.recover()
d.run(ctx)
}()
}
}
// Err returns a channel with will produce an error if encountered.
func (d *poolDispatcher) Err() <-chan error {
d.mu.Lock()
defer d.mu.Unlock()
return d.errC
}
func (d *poolDispatcher) setErr(err error) {
d.mu.Lock()
defer d.mu.Unlock()
// TODO(nathanielc): Collect all error information.
if d.err == nil {
d.err = err
d.errC <- err
}
}
// Stop the dispatcher.
func (d *poolDispatcher) Stop() error {
// Check if this is the first time invoking this method.
d.mu.Lock()
if !d.closed {
// If not, mark the dispatcher as closed and signal to the current
// workers that they should stop processing more work.
d.closed = true
close(d.closing)
}
d.mu.Unlock()
// Wait for the existing workers to finish.
d.wg.Wait()
// Grab the error from within a lock.
d.mu.Lock()
defer d.mu.Unlock()
return d.err
}
// run is the logic executed by each worker goroutine in the pool.
func (d *poolDispatcher) run(ctx context.Context) {
for {
// This loop waits for any work to be present in the queue
// or for the dispatcher to be closed or the context canceled.
select {
case <-ctx.Done():
// Immediately return, do not process any more work
return
case <-d.closing:
// We are done, nothing left to do.
return
case <-d.ready:
// Work is in the queue. Continue to pull work
// from the queue until there is none left or
// we are supposed to stop for one of the other
// reasons stated above.
d.doWork(ctx)
}
}
}
// doWork will continue pulling work from the work queue
// and running the scheduled functions until the context is canceled,
// the dispatcher is closed, or there is no more work in the queue.
func (d *poolDispatcher) doWork(ctx context.Context) {
for {
var fn ScheduleFunc
d.workMu.Lock()
if next := d.work.Next(); next != nil {
fn = next.(ScheduleFunc)
}
d.workMu.Unlock()
if fn == nil {
// No work anymore. Return to the top level loop
// which will wait until new work has been appended.
return
}
fn(ctx, d.throughput)
// Check to see if the context was canceled or
// the dispatcher was closed. This allows us to exit
// even if we have not pulled off all of the available work.
select {
case <-ctx.Done():
return
case <-d.closing:
return
default:
}
}
}