forked from elastic/beats
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ringbuf.go
293 lines (241 loc) · 7.08 KB
/
ringbuf.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
package memqueue
import (
"fmt"
"github.com/elastic/beats/libbeat/publisher"
)
// Internal event ring buffer.
// The ring is split into 2 regions.
// Region A contains active events to be send to consumers, while region B can
// only be filled by producers, if there is no more space in region A. Splitting
// the ring buffer into regions enables the broker to send batches of type
// []publisher.Event to the consumer without having to copy and/or grow/shrink the
// buffers.
type ringBuffer struct {
buf eventBuffer
regA, regB region
reserved int // amount of events in region A actively processed/reserved
}
type region struct {
index int
size int
}
type eventBuffer struct {
logger logger
events []publisher.Event
clients []clientState
}
type clientState struct {
seq uint32 // event sequence number
state *produceState // the producer it's state used to compute and signal the ACK count
}
func (b *eventBuffer) init(size int) {
b.events = make([]publisher.Event, size)
b.clients = make([]clientState, size)
}
func (b *eventBuffer) Len() int {
return len(b.events)
}
func (b *eventBuffer) Set(idx int, event publisher.Event, st clientState) {
// b.logger.Debugf("insert event: idx=%v, seq=%v\n", idx, st.seq)
b.events[idx] = event
b.clients[idx] = st
}
func newRingBuffer(log logger, size int) *ringBuffer {
b := &ringBuffer{}
b.init(log, size)
return b
}
func (b *ringBuffer) init(log logger, size int) {
*b = ringBuffer{}
b.buf.init(size)
b.buf.logger = log
}
func (b *ringBuffer) insert(event publisher.Event, client clientState) (bool, int) {
// log := b.buf.logger
// log.Debug("insert:")
// log.Debug(" region A:", b.regA)
// log.Debug(" region B:", b.regB)
// log.Debug(" reserved:", b.reserved)
// defer func() {
// log.Debug(" -> region A:", b.regA)
// log.Debug(" -> region B:", b.regB)
// log.Debug(" -> reserved:", b.reserved)
// }()
// always insert into region B, if region B exists.
// That is, we have 2 regions and region A is currently processed by consumers
if b.regB.size > 0 {
// log.Debug(" - push into B region")
idx := b.regB.index + b.regB.size
avail := b.regA.index - idx
if avail == 0 {
return false, 0
}
b.buf.Set(idx, event, client)
b.regB.size++
return true, avail - 1
}
// region B does not exist yet, check if region A is available for use
idx := b.regA.index + b.regA.size
// log.Debug(" - index: ", idx)
// log.Debug(" - buffer size: ", b.buf.Len())
avail := b.buf.Len() - idx
if avail == 0 { // no more space in region A
// log.Debug(" - region A full")
if b.regA.index == 0 {
// space to create region B, buffer is full
// log.Debug(" - no space in region B")
return false, 0
}
// create region B and insert events
// log.Debug(" - create region B")
b.regB.index = 0
b.regB.size = 1
b.buf.Set(0, event, client)
return true, b.regA.index - 1
}
// space available in region A -> let's append the event
// log.Debug(" - push into region A")
b.buf.Set(idx, event, client)
b.regA.size++
return true, avail - 1
}
// cancel removes all buffered events matching `st`, not yet reserved by
// any consumer
func (b *ringBuffer) cancel(st *produceState) int {
// log := b.buf.logger
// log.Debug("cancel:")
// log.Debug(" region A:", b.regA)
// log.Debug(" region B:", b.regB)
// log.Debug(" reserved:", b.reserved)
// defer func() {
// log.Debug(" -> region A:", b.regA)
// log.Debug(" -> region B:", b.regB)
// log.Debug(" -> reserved:", b.reserved)
// }()
// TODO: return if st has no pending events
cancelB := b.cancelRegion(st, b.regB)
b.regB.size -= cancelB
cancelA := b.cancelRegion(st, region{
index: b.regA.index + b.reserved,
size: b.regA.size - b.reserved,
})
b.regA.size -= cancelA
return cancelA + cancelB
}
func (b *ringBuffer) cancelRegion(st *produceState, reg region) (removed int) {
start := reg.index
end := start + reg.size
events := b.buf.events[start:end]
clients := b.buf.clients[start:end]
toEvents := events[:0]
toClients := clients[:0]
// filter loop
for i := 0; i < reg.size; i++ {
if clients[i].state == st {
continue // remove
}
toEvents = append(toEvents, events[i])
toClients = append(toClients, clients[i])
}
// re-initialize old buffer elements to help garbage collector
events = events[len(toEvents):]
clients = clients[len(toClients):]
for i := range events {
events[i] = publisher.Event{}
clients[i] = clientState{}
}
return len(events)
}
// activeBufferOffsets returns start and end offset
// of all available events in region A.
func (b *ringBuffer) activeBufferOffsets() (int, int) {
return b.regA.index, b.regA.index + b.regA.size
}
// reserve returns up to `sz` events from the brokerBuffer,
// exclusively marking the events as 'reserved'. Subsequent calls to `reserve`
// will only return enqueued and non-reserved events from the buffer.
// If `sz == -1`, all available events will be reserved.
func (b *ringBuffer) reserve(sz int) (int, []publisher.Event) {
// log := b.buf.logger
// log.Debug("reserve: ", sz)
// log.Debug(" region A:", b.regA)
// log.Debug(" region B:", b.regB)
// log.Debug(" reserved:", b.reserved)
// defer func() {
// log.Debug(" -> region A:", b.regA)
// log.Debug(" -> region B:", b.regB)
// log.Debug(" -> reserved:", b.reserved)
// }()
use := b.regA.size - b.reserved
// log.Debug(" - avail: ", use)
if sz > 0 {
if use > sz {
use = sz
}
}
start := b.regA.index + b.reserved
end := start + use
b.reserved += use
// log.Debug(" - start:", start)
// log.Debug(" - end:", end)
return start, b.buf.events[start:end]
}
// ack up to sz events in region A
func (b *ringBuffer) ack(sz int) {
// log := b.buf.logger
// log.Debug("ack: ", sz)
// log.Debug(" region A:", b.regA)
// log.Debug(" region B:", b.regB)
// log.Debug(" reserved:", b.reserved)
// defer func() {
// log.Debug(" -> region A:", b.regA)
// log.Debug(" -> region B:", b.regB)
// log.Debug(" -> reserved:", b.reserved)
// }()
if b.regA.size < sz {
panic(fmt.Errorf("Commit region to big (commit region=%v, buffer size=%v)",
sz, b.regA.size,
))
}
// clear region, so published events can be collected by the garbage collector:
end := b.regA.index + sz
for i := b.regA.index; i < end; i++ {
b.buf.events[i] = publisher.Event{}
}
b.regA.index = end
b.regA.size -= sz
b.reserved -= sz
if b.regA.size == 0 {
// region A is empty, transfer region B into region A
b.regA = b.regB
b.regB.index = 0
b.regB.size = 0
}
}
func (b *ringBuffer) Empty() bool {
return (b.regA.size - b.reserved) == 0
}
func (b *ringBuffer) Avail() int {
return b.regA.size - b.reserved
}
func (b *ringBuffer) RegionBActive() bool {
return b.regB.size > 0
}
func (b *ringBuffer) RegionSizes() (int, int) {
return b.regA.size, b.regB.size
}
func (b *ringBuffer) TotalAvail() int {
return b.regA.size + b.regB.size - b.reserved
}
func (b *ringBuffer) Full() bool {
var avail int
if b.regB.size > 0 {
avail = b.regA.index - b.regB.index - b.regB.size
} else {
avail = b.buf.Len() - b.regA.index - b.regA.size
}
return avail == 0
}
func (b *ringBuffer) Size() int {
return b.buf.Len()
}