forked from premendrasingh/beats
-
Notifications
You must be signed in to change notification settings - Fork 0
/
async.go
74 lines (61 loc) · 1.46 KB
/
async.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
package publisher
import (
"github.com/elastic/beats/libbeat/common/op"
"github.com/elastic/beats/libbeat/logp"
)
type asyncPipeline struct {
outputs []worker
pub *BeatPublisher
}
const (
defaultBulkSize = 2048
)
func newAsyncPipeline(
pub *BeatPublisher,
hwm, bulkHWM int,
ws *workerSignal,
) *asyncPipeline {
p := &asyncPipeline{pub: pub}
var outputs []worker
for _, out := range pub.Output {
outputs = append(outputs, makeAsyncOutput(ws, hwm, bulkHWM, out))
}
p.outputs = outputs
return p
}
func (p *asyncPipeline) publish(m message) bool {
if p.pub.disabled {
debug("publisher disabled")
op.SigCompleted(m.context.Signal)
return true
}
if m.context.Signal != nil {
s := op.CancelableSignaler(m.client.canceler, m.context.Signal)
if len(p.outputs) > 1 {
s = op.SplitSignaler(s, len(p.outputs))
}
m.context.Signal = s
}
for _, o := range p.outputs {
o.send(m)
}
return true
}
func makeAsyncOutput(
ws *workerSignal,
hwm, bulkHWM int,
worker *outputWorker,
) worker {
config := worker.config
flushInterval := config.FlushInterval
maxBulkSize := config.BulkMaxSize
logp.Info("Flush Interval set to: %v", flushInterval)
logp.Info("Max Bulk Size set to: %v", maxBulkSize)
// batching disabled
if flushInterval <= 0 || maxBulkSize <= 0 {
return worker
}
debug("create bulk processing worker (interval=%v, bulk size=%v)",
flushInterval, maxBulkSize)
return newBulkWorker(ws, hwm, bulkHWM, worker, flushInterval, maxBulkSize)
}