-
Notifications
You must be signed in to change notification settings - Fork 16
/
streaming_data_source.go
380 lines (337 loc) · 13.6 KB
/
streaming_data_source.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
package datasource
import (
"net/http"
"sync"
"time"
"gopkg.in/launchdarkly/go-sdk-common.v2/ldlog"
"gopkg.in/launchdarkly/go-sdk-common.v2/ldtime"
ldevents "gopkg.in/launchdarkly/go-sdk-events.v1"
"gopkg.in/launchdarkly/go-server-sdk.v5/interfaces"
"gopkg.in/launchdarkly/go-server-sdk.v5/interfaces/ldstoretypes"
es "github.com/launchdarkly/eventsource"
)
// Implementation of the streaming data source, not including the lower-level SSE implementation which is in
// the eventsource package.
//
// Error handling works as follows:
// 1. If any event is malformed, we must assume the stream is broken and we may have missed updates. Set the
// data source state to INTERRUPTED, with an error kind of INVALID_DATA, and restart the stream.
// 2. If we try to put updates into the data store and we get an error, we must assume something's wrong with the
// data store. We don't have to log this error because it is logged by DataSourceUpdatesImpl, which will also set
// our state to INTERRUPTED for us.
// 2a. If the data store supports status notifications (which all persistent stores normally do), then we can
// assume it has entered a failed state and will notify us once it is working again. If and when it recovers, then
// it will tell us whether we need to restart the stream (to ensure that we haven't missed any updates), or
// whether it has already persisted all of the stream updates we received during the outage.
// 2b. If the data store doesn't support status notifications (which is normally only true of the in-memory store)
// then we don't know the significance of the error, but we must assume that updates have been lost, so we'll
// restart the stream.
// 3. If we receive an unrecoverable error like HTTP 401, we close the stream and don't retry, and set the state
// to OFF. Any other HTTP error or network error causes a retry with backoff, with a state of INTERRUPTED.
// 4. We set the Future returned by start() to tell the client initialization logic that initialization has either
// succeeded (we got an initial payload and successfully stored it) or permanently failed (we got a 401, etc.).
// Otherwise, the client initialization method may time out but we will still be retrying in the background, and
// if we succeed then the client can detect that we're initialized now by calling our Initialized method.
const (
putEvent = "put"
patchEvent = "patch"
deleteEvent = "delete"
streamReadTimeout = 5 * time.Minute // the LaunchDarkly stream should send a heartbeat comment every 3 minutes
streamMaxRetryDelay = 30 * time.Second
streamRetryResetInterval = 60 * time.Second
streamJitterRatio = 0.5
defaultStreamRetryDelay = 1 * time.Second
streamingErrorContext = "in stream connection"
streamingWillRetryMessage = "will retry"
)
// StreamProcessor is the internal implementation of the streaming data source.
//
// This type is exported from internal so that the StreamingDataSourceBuilder tests can verify its
// configuration. All other code outside of this package should interact with it only via the
// DataSource interface.
type StreamProcessor struct {
dataSourceUpdates interfaces.DataSourceUpdates
streamURI string
initialReconnectDelay time.Duration
client *http.Client
headers http.Header
diagnosticsManager *ldevents.DiagnosticsManager
loggers ldlog.Loggers
setInitializedOnce sync.Once
isInitialized bool
halt chan struct{}
storeStatusCh <-chan interfaces.DataStoreStatus
connectionAttemptStartTime ldtime.UnixMillisecondTime
connectionAttemptLock sync.Mutex
readyOnce sync.Once
closeOnce sync.Once
}
// NewStreamProcessor creates the internal implementation of the streaming data source.
func NewStreamProcessor(
context interfaces.ClientContext,
dataSourceUpdates interfaces.DataSourceUpdates,
streamURI string,
initialReconnectDelay time.Duration,
) *StreamProcessor {
sp := &StreamProcessor{
dataSourceUpdates: dataSourceUpdates,
streamURI: streamURI,
initialReconnectDelay: initialReconnectDelay,
headers: context.GetHTTP().GetDefaultHeaders(),
loggers: context.GetLogging().GetLoggers(),
halt: make(chan struct{}),
}
if hdm, ok := context.(hasDiagnosticsManager); ok {
sp.diagnosticsManager = hdm.GetDiagnosticsManager()
}
sp.client = context.GetHTTP().CreateHTTPClient()
// Client.Timeout isn't just a connect timeout, it will break the connection if a full response
// isn't received within that time (which, with the stream, it never will be), so we must make
// sure it's zero and not the usual configured default. What we do want is a *connection* timeout,
// which is set by Config.newHTTPClient as a property of the Dialer.
sp.client.Timeout = 0
return sp
}
//nolint:golint,stylecheck // no doc comment for standard method
func (sp *StreamProcessor) IsInitialized() bool {
return sp.isInitialized
}
//nolint:golint,stylecheck // no doc comment for standard method
func (sp *StreamProcessor) Start(closeWhenReady chan<- struct{}) {
sp.loggers.Info("Starting LaunchDarkly streaming connection")
if sp.dataSourceUpdates.GetDataStoreStatusProvider().IsStatusMonitoringEnabled() {
sp.storeStatusCh = sp.dataSourceUpdates.GetDataStoreStatusProvider().AddStatusListener()
}
go sp.subscribe(closeWhenReady)
}
func (sp *StreamProcessor) consumeStream(stream *es.Stream, closeWhenReady chan<- struct{}) {
// Consume remaining Events and Errors so we can garbage collect
defer func() {
for range stream.Events {
} // COVERAGE: no way to cause this condition in unit tests
if stream.Errors != nil {
for range stream.Errors { // COVERAGE: no way to cause this condition in unit tests
}
}
}()
for {
select {
case event, ok := <-stream.Events:
if !ok {
// COVERAGE: stream.Events is only closed if the EventSource has been closed. However, that
// only happens when we have received from sp.halt, in which case we return immediately
// after calling stream.Close(), terminating the for loop-- so we should not actually reach
// this point. Still, in case the channel is somehow closed unexpectedly, we do want to
// terminate the loop.
return
}
sp.logConnectionResult(true)
processedEvent := true
shouldRestart := false
gotMalformedEvent := func(event es.Event, err error) {
sp.loggers.Errorf(
"Received streaming \"%s\" event with malformed JSON data (%s); will restart stream",
event.Event(),
err,
)
errorInfo := interfaces.DataSourceErrorInfo{
Kind: interfaces.DataSourceErrorKindInvalidData,
Message: err.Error(),
Time: time.Now(),
}
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo)
shouldRestart = true // scenario 1 in error handling comments at top of file
processedEvent = false
}
storeUpdateFailed := func(updateDesc string) {
if sp.storeStatusCh != nil {
sp.loggers.Errorf("Failed to store %s in data store; will try again once data store is working", updateDesc)
// scenario 2a in error handling comments at top of file
} else {
sp.loggers.Errorf("Failed to store %s in data store; will restart stream until successful", updateDesc)
shouldRestart = true // scenario 2b
processedEvent = false
}
}
switch event.Event() {
case putEvent:
put, err := parsePutData([]byte(event.Data()))
if err != nil {
gotMalformedEvent(event, err)
break
}
if sp.dataSourceUpdates.Init(put.Data) {
sp.setInitializedAndNotifyClient(true, closeWhenReady)
} else {
storeUpdateFailed("initial streaming data")
}
case patchEvent:
patch, err := parsePatchData([]byte(event.Data()))
if err != nil {
gotMalformedEvent(event, err)
break
}
if patch.Kind == nil {
break // ignore unrecognized item type
}
if !sp.dataSourceUpdates.Upsert(patch.Kind, patch.Key, patch.Data) {
storeUpdateFailed("streaming update of " + patch.Key)
}
case deleteEvent:
del, err := parseDeleteData([]byte(event.Data()))
if err != nil {
gotMalformedEvent(event, err)
break
}
if del.Kind == nil {
break // ignore unrecognized item type
}
deletedItem := ldstoretypes.ItemDescriptor{Version: del.Version, Item: nil}
if !sp.dataSourceUpdates.Upsert(del.Kind, del.Key, deletedItem) {
storeUpdateFailed("streaming deletion of " + del.Key)
}
default:
sp.loggers.Infof("Unexpected event found in stream: %s", event.Event())
}
if processedEvent {
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateValid, interfaces.DataSourceErrorInfo{})
}
if shouldRestart {
stream.Restart()
}
case newStoreStatus := <-sp.storeStatusCh:
if sp.loggers.IsDebugEnabled() {
sp.loggers.Debugf("StreamProcessor received store status update: %+v", newStoreStatus)
}
if newStoreStatus.Available {
// The store has just transitioned from unavailable to available (scenario 2a above)
if newStoreStatus.NeedsRefresh {
// The store is telling us that it can't guarantee that all of the latest data was cached.
// So we'll restart the stream to ensure a full refresh.
sp.loggers.Warn("Restarting stream to refresh data after data store outage")
stream.Restart()
}
// All of the updates were cached and have been written to the store, so we don't need to
// restart the stream. We just need to make sure the client knows we're initialized now
// (in case the initial "put" was not stored).
sp.setInitializedAndNotifyClient(true, closeWhenReady)
}
case <-sp.halt:
stream.Close()
return
}
}
}
func (sp *StreamProcessor) subscribe(closeWhenReady chan<- struct{}) {
req, _ := http.NewRequest("GET", sp.streamURI+"/all", nil)
for k, vv := range sp.headers {
req.Header[k] = vv
}
sp.loggers.Info("Connecting to LaunchDarkly stream")
sp.logConnectionStarted()
initialRetryDelay := sp.initialReconnectDelay
if initialRetryDelay <= 0 { // COVERAGE: can't cause this condition in unit tests
initialRetryDelay = defaultStreamRetryDelay
}
errorHandler := func(err error) es.StreamErrorHandlerResult {
sp.logConnectionResult(false)
if se, ok := err.(es.SubscriptionError); ok {
errorInfo := interfaces.DataSourceErrorInfo{
Kind: interfaces.DataSourceErrorKindErrorResponse,
StatusCode: se.Code,
Time: time.Now(),
}
recoverable := checkIfErrorIsRecoverableAndLog(
sp.loggers,
httpErrorDescription(se.Code),
streamingErrorContext,
se.Code,
streamingWillRetryMessage,
)
if recoverable {
sp.logConnectionStarted()
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo)
return es.StreamErrorHandlerResult{CloseNow: false}
}
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, errorInfo)
return es.StreamErrorHandlerResult{CloseNow: true}
}
checkIfErrorIsRecoverableAndLog(
sp.loggers,
err.Error(),
streamingErrorContext,
0,
streamingWillRetryMessage,
)
errorInfo := interfaces.DataSourceErrorInfo{
Kind: interfaces.DataSourceErrorKindNetworkError,
Message: err.Error(),
Time: time.Now(),
}
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateInterrupted, errorInfo)
sp.logConnectionStarted()
return es.StreamErrorHandlerResult{CloseNow: false}
}
stream, err := es.SubscribeWithRequestAndOptions(req,
es.StreamOptionHTTPClient(sp.client),
es.StreamOptionReadTimeout(streamReadTimeout),
es.StreamOptionInitialRetry(initialRetryDelay),
es.StreamOptionUseBackoff(streamMaxRetryDelay),
es.StreamOptionUseJitter(streamJitterRatio),
es.StreamOptionRetryResetInterval(streamRetryResetInterval),
es.StreamOptionErrorHandler(errorHandler),
es.StreamOptionCanRetryFirstConnection(-1),
es.StreamOptionLogger(sp.loggers.ForLevel(ldlog.Info)),
)
if err != nil {
sp.logConnectionResult(false)
close(closeWhenReady)
return
}
sp.consumeStream(stream, closeWhenReady)
}
func (sp *StreamProcessor) setInitializedAndNotifyClient(success bool, closeWhenReady chan<- struct{}) {
if success {
sp.setInitializedOnce.Do(func() {
sp.loggers.Info("LaunchDarkly streaming is active")
sp.isInitialized = true
})
}
sp.readyOnce.Do(func() {
close(closeWhenReady)
})
}
func (sp *StreamProcessor) logConnectionStarted() {
sp.connectionAttemptLock.Lock()
defer sp.connectionAttemptLock.Unlock()
sp.connectionAttemptStartTime = ldtime.UnixMillisNow()
}
func (sp *StreamProcessor) logConnectionResult(success bool) {
sp.connectionAttemptLock.Lock()
startTimeWas := sp.connectionAttemptStartTime
sp.connectionAttemptStartTime = 0
sp.connectionAttemptLock.Unlock()
if startTimeWas > 0 && sp.diagnosticsManager != nil {
timestamp := ldtime.UnixMillisNow()
sp.diagnosticsManager.RecordStreamInit(timestamp, !success, uint64(timestamp-startTimeWas))
}
}
//nolint:golint,stylecheck // no doc comment for standard method
func (sp *StreamProcessor) Close() error {
sp.closeOnce.Do(func() {
close(sp.halt)
if sp.storeStatusCh != nil {
sp.dataSourceUpdates.GetDataStoreStatusProvider().RemoveStatusListener(sp.storeStatusCh)
}
sp.dataSourceUpdates.UpdateStatus(interfaces.DataSourceStateOff, interfaces.DataSourceErrorInfo{})
})
return nil
}
// GetBaseURI returns the configured streaming base URI, for testing.
func (sp *StreamProcessor) GetBaseURI() string {
return sp.streamURI
}
// GetInitialReconnectDelay returns the configured reconnect delay, for testing.
func (sp *StreamProcessor) GetInitialReconnectDelay() time.Duration {
return sp.initialReconnectDelay
}