-
Notifications
You must be signed in to change notification settings - Fork 1.2k
/
connection.go
482 lines (431 loc) · 14.6 KB
/
connection.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
// Copyright 2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package managedwriter
import (
"context"
"errors"
"fmt"
"io"
"sync"
"cloud.google.com/go/bigquery/storage/apiv1/storagepb"
"github.com/googleapis/gax-go/v2"
"go.opencensus.io/tag"
"google.golang.org/grpc/codes"
grpcstatus "google.golang.org/grpc/status"
)
const (
poolIDPrefix string = "connectionpool"
connIDPrefix string = "connection"
writerIDPrefix string = "writer"
)
var (
errNoRouterForPool = errors.New("no router for connection pool")
)
// connectionPool represents a pooled set of connections.
//
// The pool retains references to connections, and maintains the mapping between writers
// and connections.
type connectionPool struct {
id string
location string // BQ region associated with this pool.
// the pool retains the long-lived context responsible for opening/maintaining bidi connections.
ctx context.Context
cancel context.CancelFunc
baseFlowController *flowController // template flow controller used for building connections.
// We centralize the open function on the pool, rather than having an instance of the open func on every
// connection. Opening the connection is a stateless operation.
open func(opts ...gax.CallOption) (storagepb.BigQueryWrite_AppendRowsClient, error)
// We specify one set of calloptions for the pool.
// All connections in the pool open with the same call options.
callOptions []gax.CallOption
router poolRouter // poolManager makes the decisions about connections and routing.
retry *statelessRetryer // default retryer for the pool.
}
// activateRouter handles wiring up a connection pool and it's router.
func (pool *connectionPool) activateRouter(rtr poolRouter) error {
if pool.router != nil {
return fmt.Errorf("router already activated")
}
if err := rtr.poolAttach(pool); err != nil {
return fmt.Errorf("router rejected attach: %w", err)
}
pool.router = rtr
return nil
}
func (pool *connectionPool) Close() error {
// Signal router and cancel context, which should propagate to all writers.
var err error
if pool.router != nil {
err = pool.router.poolDetach()
}
if cancel := pool.cancel; cancel != nil {
cancel()
}
return err
}
// pickConnection is used by writers to select a connection.
func (pool *connectionPool) selectConn(pw *pendingWrite) (*connection, error) {
if pool.router == nil {
return nil, errNoRouterForPool
}
return pool.router.pickConnection(pw)
}
func (pool *connectionPool) addWriter(writer *ManagedStream) error {
if p := writer.pool; p != nil {
return fmt.Errorf("writer already attached to pool %q", p.id)
}
if pool.router == nil {
return errNoRouterForPool
}
if err := pool.router.writerAttach(writer); err != nil {
return err
}
writer.pool = pool
return nil
}
func (pool *connectionPool) removeWriter(writer *ManagedStream) error {
if pool.router == nil {
return errNoRouterForPool
}
detachErr := pool.router.writerDetach(writer)
return detachErr
}
// openWithRetry establishes a new bidi stream and channel pair. It is used by connection objects
// when (re)opening the network connection to the backend.
//
// The connection.getStream() func should be the only consumer of this.
func (cp *connectionPool) openWithRetry(co *connection) (storagepb.BigQueryWrite_AppendRowsClient, chan *pendingWrite, error) {
r := &unaryRetryer{}
for {
recordStat(cp.ctx, AppendClientOpenCount, 1)
arc, err := cp.open(cp.callOptions...)
if err != nil {
bo, shouldRetry := r.Retry(err)
if shouldRetry {
recordStat(cp.ctx, AppendClientOpenRetryCount, 1)
if err := gax.Sleep(cp.ctx, bo); err != nil {
return nil, nil, err
}
continue
} else {
// non-retriable error while opening
return nil, nil, err
}
}
// The channel relationship with its ARC is 1:1. If we get a new ARC, create a new pending
// write channel and fire up the associated receive processor. The channel ensures that
// responses for a connection are processed in the same order that appends were sent.
depth := 1000 // default backend queue limit
if d := co.fc.maxInsertCount; d > 0 {
depth = d
}
ch := make(chan *pendingWrite, depth)
go connRecvProcessor(co, arc, ch)
return arc, ch, nil
}
}
// returns the stateless default retryer for the pool. If one's not set (re-enqueue retries disabled),
// it returns a retryer that only permits single attempts.
func (cp *connectionPool) defaultRetryer() *statelessRetryer {
if cp.retry != nil {
return cp.retry
}
return &statelessRetryer{
maxAttempts: 1,
}
}
// connection models the underlying AppendRows grpc bidi connection used for writing
// data and receiving acknowledgements. It is responsible for enqueing writes and processing
// responses from the backend.
type connection struct {
id string
pool *connectionPool // each connection retains a reference to its owning pool.
fc *flowController // each connection has it's own flow controller.
ctx context.Context // retained context for maintaining the connection, derived from the owning pool.
cancel context.CancelFunc
retry *statelessRetryer
optimizer sendOptimizer
mu sync.Mutex
arc *storagepb.BigQueryWrite_AppendRowsClient // reference to the grpc connection (send, recv, close)
reconnect bool //
err error // terminal connection error
pending chan *pendingWrite
loadBytesThreshold int
loadCountThreshold int
}
type connectionMode string
const (
multiplexConnectionMode connectionMode = "MULTIPLEX"
simplexConnectionMode connectionMode = "SIMPLEX"
verboseConnectionMode connectionMode = "VERBOSE"
)
func newConnection(pool *connectionPool, mode connectionMode) *connection {
if pool == nil {
return nil
}
// create and retain a cancellable context.
connCtx, cancel := context.WithCancel(pool.ctx)
fc := newFlowController(0, 0)
if pool != nil {
fc = copyFlowController(pool.baseFlowController)
}
countLimit, byteLimit := computeLoadThresholds(fc)
return &connection{
id: newUUID(connIDPrefix),
pool: pool,
fc: fc,
ctx: connCtx,
cancel: cancel,
optimizer: optimizer(mode),
loadBytesThreshold: byteLimit,
loadCountThreshold: countLimit,
}
}
func computeLoadThresholds(fc *flowController) (countLimit, byteLimit int) {
countLimit = 1000
byteLimit = 0
if fc != nil {
if fc.maxInsertBytes > 0 {
// 20% of byte limit
byteLimit = int(float64(fc.maxInsertBytes) * 0.2)
}
if fc.maxInsertCount > 0 {
// MIN(1, 20% of insert limit)
countLimit = int(float64(fc.maxInsertCount) * 0.2)
if countLimit < 1 {
countLimit = 1
}
}
}
return
}
func optimizer(mode connectionMode) sendOptimizer {
switch mode {
case multiplexConnectionMode:
return &multiplexOptimizer{}
case verboseConnectionMode:
return &verboseOptimizer{}
case simplexConnectionMode:
return &simplexOptimizer{}
}
return nil
}
// release is used to signal flow control release when a write is no longer in flight.
func (co *connection) release(pw *pendingWrite) {
co.fc.release(pw.reqSize)
}
// signal indicating that multiplex traffic level is high enough to warrant adding more connections.
func (co *connection) isLoaded() bool {
if co.loadCountThreshold > 0 && co.fc.count() > co.loadCountThreshold {
return true
}
if co.loadBytesThreshold > 0 && co.fc.bytes() > co.loadBytesThreshold {
return true
}
return false
}
// curLoad is a representation of connection load.
// Its primary purpose is comparing the load of different connections.
func (co *connection) curLoad() float64 {
load := float64(co.fc.count()) / float64(co.loadCountThreshold+1)
if co.fc.maxInsertBytes > 0 {
load += (float64(co.fc.bytes()) / float64(co.loadBytesThreshold+1))
load = load / 2
}
return load
}
// close closes a connection.
func (co *connection) close() {
co.mu.Lock()
defer co.mu.Unlock()
// first, cancel the retained context.
if co.cancel != nil {
co.cancel()
co.cancel = nil
}
// close sending if we have a real ARC.
if co.arc != nil && (*co.arc) != (storagepb.BigQueryWrite_AppendRowsClient)(nil) {
(*co.arc).CloseSend()
co.arc = nil
}
// mark terminal error if not already set.
if co.err != nil {
co.err = io.EOF
}
// signal pending channel close.
if co.pending != nil {
close(co.pending)
}
}
// lockingAppend handles a single append request on a given connection.
func (co *connection) lockingAppend(pw *pendingWrite) error {
// Don't both calling/retrying if this append's context is already expired.
if err := pw.reqCtx.Err(); err != nil {
return err
}
if err := co.fc.acquire(pw.reqCtx, pw.reqSize); err != nil {
// We've failed to acquire. This may get retried on a different connection, so marking the write done is incorrect.
return err
}
var statsOnExit func()
// critical section: Things that need to happen inside the critical section:
//
// * get/open conenction
// * issue the append
// * add the pending write to the channel for the connection (ordering for the response)
co.mu.Lock()
defer func() {
co.mu.Unlock()
if statsOnExit != nil {
statsOnExit()
}
}()
var arc *storagepb.BigQueryWrite_AppendRowsClient
var ch chan *pendingWrite
var err error
// We still need to reconnect if we need to signal a new schema for explicit streams.
// Rather than adding more state to the connection, we just look at the request as we
// do not allow multiplexing to include explicit streams.
forceReconnect := false
if !canMultiplex(pw.writeStreamID) {
if pw.writer != nil && pw.descVersion != nil && pw.descVersion.isNewer(pw.writer.curDescVersion) {
forceReconnect = true
pw.writer.curDescVersion = pw.descVersion
}
}
arc, ch, err = co.getStream(arc, forceReconnect)
if err != nil {
return err
}
pw.attemptCount = pw.attemptCount + 1
if co.optimizer != nil {
err = co.optimizer.optimizeSend((*arc), pw)
if err != nil {
// Reset optimizer state on error.
co.optimizer.signalReset()
}
} else {
// No optimizer present, send a fully populated request.
err = (*arc).Send(pw.constructFullRequest(true))
}
if err != nil {
if shouldReconnect(err) {
// if we think this connection is unhealthy, force a reconnect on the next send.
co.reconnect = true
}
return err
}
// Compute numRows, once we pass ownership to the channel the request may be
// cleared.
var numRows int64
if r := pw.req.GetProtoRows(); r != nil {
if pr := r.GetRows(); pr != nil {
numRows = int64(len(pr.GetSerializedRows()))
}
}
statsOnExit = func() {
// these will get recorded once we exit the critical section.
// TODO: resolve open questions around what labels should be attached (connection, streamID, etc)
recordStat(co.ctx, AppendRequestRows, numRows)
recordStat(co.ctx, AppendRequests, 1)
recordStat(co.ctx, AppendRequestBytes, int64(pw.reqSize))
}
ch <- pw
return nil
}
// getStream returns either a valid ARC client stream or permanent error.
//
// Any calls to getStream should do so in possesion of the critical section lock.
func (co *connection) getStream(arc *storagepb.BigQueryWrite_AppendRowsClient, forceReconnect bool) (*storagepb.BigQueryWrite_AppendRowsClient, chan *pendingWrite, error) {
if co.err != nil {
return nil, nil, co.err
}
co.err = co.ctx.Err()
if co.err != nil {
return nil, nil, co.err
}
// Previous activity on the stream indicated it is not healthy, so propagate that as a reconnect.
if co.reconnect {
forceReconnect = true
co.reconnect = false
}
// Always return the retained ARC if the arg differs.
if arc != co.arc && !forceReconnect {
return co.arc, co.pending, nil
}
// We need to (re)open a connection. Cleanup previous connection and channel if they are present.
if co.arc != nil && (*co.arc) != (storagepb.BigQueryWrite_AppendRowsClient)(nil) {
(*co.arc).CloseSend()
}
if co.pending != nil {
close(co.pending)
}
co.arc = new(storagepb.BigQueryWrite_AppendRowsClient)
// We're going to (re)open the connection, so clear any optimizer state.
if co.optimizer != nil {
co.optimizer.signalReset()
}
*co.arc, co.pending, co.err = co.pool.openWithRetry(co)
return co.arc, co.pending, co.err
}
// enables testing
type streamClientFunc func(context.Context, ...gax.CallOption) (storagepb.BigQueryWrite_AppendRowsClient, error)
// connRecvProcessor is used to propagate append responses back up with the originating write requests. It
// It runs as a goroutine. A connection object allows for reconnection, and each reconnection establishes a new
// processing gorouting and backing channel.
func connRecvProcessor(co *connection, arc storagepb.BigQueryWrite_AppendRowsClient, ch <-chan *pendingWrite) {
for {
select {
case <-co.ctx.Done():
// Context is done, so we're not going to get further updates. Mark all work left in the channel
// with the context error. We don't attempt to re-enqueue in this case.
for {
pw, ok := <-ch
if !ok {
return
}
// It's unlikely this connection will recover here, but for correctness keep the flow controller
// state correct by releasing.
co.release(pw)
pw.markDone(nil, co.ctx.Err())
}
case nextWrite, ok := <-ch:
if !ok {
// Channel closed, all elements processed.
return
}
// block until we get a corresponding response or err from stream.
resp, err := arc.Recv()
co.release(nextWrite)
if err != nil {
nextWrite.writer.processRetry(nextWrite, co, nil, err)
continue
}
// Record that we did in fact get a response from the backend.
recordStat(co.ctx, AppendResponses, 1)
if status := resp.GetError(); status != nil {
// The response from the backend embedded a status error. We record that the error
// occurred, and tag it based on the response code of the status.
if tagCtx, tagErr := tag.New(co.ctx, tag.Insert(keyError, codes.Code(status.GetCode()).String())); tagErr == nil {
recordStat(tagCtx, AppendResponseErrors, 1)
}
respErr := grpcstatus.ErrorProto(status)
nextWrite.writer.processRetry(nextWrite, co, resp, respErr)
continue
}
// We had no error in the receive or in the response. Mark the write done.
nextWrite.markDone(resp, nil)
}
}
}