-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
appendresult.go
230 lines (206 loc) · 6.89 KB
/
appendresult.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package managedwriter
import (
"context"
"fmt"
"cloud.google.com/go/bigquery/storage/apiv1/storagepb"
"github.com/googleapis/gax-go/v2/apierror"
grpcstatus "google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
)
// NoStreamOffset is a sentinel value for signalling we're not tracking
// stream offset (e.g. a default stream which allows simultaneous append streams).
const NoStreamOffset int64 = -1
// AppendResult tracks the status of a batch of data rows.
type AppendResult struct {
// rowData contains the serialized row data.
rowData [][]byte
ready chan struct{}
// if the append failed without a response, this will retain a reference to the error.
err error
// retains the original response.
response *storagepb.AppendRowsResponse
// retains the number of times this individual write was enqueued.
totalAttempts int
}
func newAppendResult(data [][]byte) *AppendResult {
return &AppendResult{
ready: make(chan struct{}),
rowData: data,
}
}
// Ready blocks until the append request has reached a completed state,
// which may be a successful append or an error.
func (ar *AppendResult) Ready() <-chan struct{} { return ar.ready }
// GetResult returns the optional offset of this row, as well as any error encountered while
// processing the append.
//
// This call blocks until the result is ready, or context is no longer valid.
func (ar *AppendResult) GetResult(ctx context.Context) (int64, error) {
select {
case <-ctx.Done():
return NoStreamOffset, ctx.Err()
case <-ar.Ready():
full, err := ar.FullResponse(ctx)
offset := NoStreamOffset
if full != nil {
if result := full.GetAppendResult(); result != nil {
if off := result.GetOffset(); off != nil {
offset = off.GetValue()
}
}
}
return offset, err
}
}
// FullResponse returns the full content of the AppendRowsResponse, and any error encountered while
// processing the append.
//
// The AppendRowResponse may contain an embedded error. An embedded error in the response will be
// converted and returned as the error response, so this method may return both the
// AppendRowsResponse and an error.
//
// This call blocks until the result is ready, or context is no longer valid.
func (ar *AppendResult) FullResponse(ctx context.Context) (*storagepb.AppendRowsResponse, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-ar.Ready():
var err error
if ar.err != nil {
err = ar.err
} else {
if ar.response != nil {
if status := ar.response.GetError(); status != nil {
statusErr := grpcstatus.ErrorProto(status)
// Provide an APIError if possible.
if apiErr, ok := apierror.FromError(statusErr); ok {
err = apiErr
} else {
err = statusErr
}
}
}
}
if ar.response != nil {
return proto.Clone(ar.response).(*storagepb.AppendRowsResponse), err
}
return nil, err
}
}
func (ar *AppendResult) offset(ctx context.Context) int64 {
select {
case <-ctx.Done():
return NoStreamOffset
case <-ar.Ready():
if ar.response != nil {
if result := ar.response.GetAppendResult(); result != nil {
if off := result.GetOffset(); off != nil {
return off.GetValue()
}
}
}
return NoStreamOffset
}
}
// UpdatedSchema returns the updated schema for a table if supplied by the backend as part
// of the append response.
//
// This call blocks until the result is ready, or context is no longer valid.
func (ar *AppendResult) UpdatedSchema(ctx context.Context) (*storagepb.TableSchema, error) {
select {
case <-ctx.Done():
return nil, fmt.Errorf("context done")
case <-ar.Ready():
if ar.response != nil {
if schema := ar.response.GetUpdatedSchema(); schema != nil {
return proto.Clone(schema).(*storagepb.TableSchema), nil
}
}
return nil, nil
}
}
// TotalAttempts returns the number of times this write was attempted.
//
// This call blocks until the result is ready, or context is no longer valid.
func (ar *AppendResult) TotalAttempts(ctx context.Context) (int, error) {
select {
case <-ctx.Done():
return 0, fmt.Errorf("context done")
case <-ar.Ready():
return ar.totalAttempts, nil
}
}
// pendingWrite tracks state for a set of rows that are part of a single
// append request.
type pendingWrite struct {
// writer retains a reference to the origin of a pending write. Primary
// used is to inform routing decisions.
writer *ManagedStream
request *storagepb.AppendRowsRequest
// for schema evolution cases, accept a new schema
newSchema *descriptorpb.DescriptorProto
result *AppendResult
// this is used by the flow controller.
reqSize int
// retains the original request context, primarily for checking against
// cancellation signals.
reqCtx context.Context
// tracks the number of times we've attempted this append request.
attemptCount int
}
// newPendingWrite constructs the proto request and attaches references
// to the pending results for later consumption. The provided context is
// embedded in the pending write, as the write may be retried and we want
// to respect the original context for expiry/cancellation etc.
func newPendingWrite(ctx context.Context, appends [][]byte) *pendingWrite {
pw := &pendingWrite{
request: &storagepb.AppendRowsRequest{
Rows: &storagepb.AppendRowsRequest_ProtoRows{
ProtoRows: &storagepb.AppendRowsRequest_ProtoData{
Rows: &storagepb.ProtoRows{
SerializedRows: appends,
},
},
},
},
result: newAppendResult(appends),
reqCtx: ctx,
}
// We compute the size now for flow controller purposes, though
// the actual request size may be slightly larger (e.g. the first
// request in a new stream bears schema and stream id).
pw.reqSize = proto.Size(pw.request)
return pw
}
// markDone propagates finalization of an append request to the associated
// AppendResult.
func (pw *pendingWrite) markDone(resp *storagepb.AppendRowsResponse, err error, fc *flowController) {
if resp != nil {
pw.result.response = resp
}
pw.result.err = err
// Record the final attempts in the result for the user.
pw.result.totalAttempts = pw.attemptCount
close(pw.result.ready)
// Clear the reference to the request.
pw.request = nil
// if there's a flow controller, signal release. The only time this should be nil is when
// encountering issues with flow control during enqueuing the initial request.
if fc != nil {
fc.release(pw.reqSize)
}
}