-
Notifications
You must be signed in to change notification settings - Fork 402
/
reporter.go
320 lines (288 loc) · 11.7 KB
/
reporter.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package audit
import (
"context"
"strings"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/storj"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/reputation"
)
// reporter records audit reports in overlay and implements the Reporter interface.
//
// architecture: Service
type reporter struct {
log *zap.Logger
reputations *reputation.Service
overlay *overlay.Service
metabase *metabase.DB
containment Containment
maxRetries int
maxReverifyCount int32
}
// Reporter records audit reports in the overlay and database.
type Reporter interface {
RecordAudits(ctx context.Context, req Report)
ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error)
RecordReverificationResult(ctx context.Context, pendingJob *ReverificationJob, outcome Outcome, reputation overlay.ReputationStatus) (err error)
}
// Report contains audit result.
// It records whether an audit is able to be completed, the total number of
// pieces a given audit has conducted for, lists for nodes that
// succeeded, failed, were offline, have pending audits, or failed for unknown
// reasons and their current reputation status.
type Report struct {
Segment *metabase.Segment
Successes storj.NodeIDList
Fails metabase.Pieces
Offlines storj.NodeIDList
PendingAudits []*ReverificationJob
Unknown storj.NodeIDList
NodesReputation map[storj.NodeID]overlay.ReputationStatus
}
// NewReporter instantiates a reporter.
func NewReporter(log *zap.Logger, reputations *reputation.Service, overlay *overlay.Service, metabase *metabase.DB, containment Containment, maxRetries int, maxReverifyCount int32) Reporter {
return &reporter{
log: log,
reputations: reputations,
overlay: overlay,
metabase: metabase,
containment: containment,
maxRetries: maxRetries,
maxReverifyCount: maxReverifyCount,
}
}
// RecordAudits saves audit results, applying reputation changes as appropriate.
// If some records can not be updated after a number of attempts, the failures
// are logged at level ERROR, but are otherwise thrown away.
func (reporter *reporter) RecordAudits(ctx context.Context, req Report) {
defer mon.Task()(&ctx)(nil)
successes := req.Successes
fails := req.Fails
unknowns := req.Unknown
offlines := req.Offlines
pendingAudits := req.PendingAudits
logger := reporter.log
if req.Segment != nil {
logger = logger.With(zap.Stringer("stream ID", req.Segment.StreamID), zap.Uint64("position", req.Segment.Position.Encode()))
}
logger.Debug("Reporting audits",
zap.Int("successes", len(successes)),
zap.Int("failures", len(fails)),
zap.Int("unknowns", len(unknowns)),
zap.Int("offlines", len(offlines)),
zap.Int("pending", len(pendingAudits)),
)
nodesReputation := req.NodesReputation
reportFailures := func(tries int, resultType string, err error, nodes storj.NodeIDList, pending []*ReverificationJob) {
if err == nil || tries < reporter.maxRetries {
// don't need to report anything until the last time through
return
}
reporter.log.Error("failed to update reputation information with audit results",
zap.String("result type", resultType),
zap.Error(err),
zap.String("node IDs", strings.Join(nodes.Strings(), ", ")),
zap.Any("pending segment audits", pending))
}
var err error
for tries := 0; tries <= reporter.maxRetries; tries++ {
if len(successes) == 0 && len(fails) == 0 && len(unknowns) == 0 && len(offlines) == 0 && len(pendingAudits) == 0 {
return
}
successes, err = reporter.recordAuditStatus(ctx, successes, nodesReputation, reputation.AuditSuccess)
reportFailures(tries, "successful", err, successes, nil)
fails, err = reporter.recordFailedAudits(ctx, req.Segment, fails, nodesReputation)
reportFailures(tries, "failed", err, nil, nil)
unknowns, err = reporter.recordAuditStatus(ctx, unknowns, nodesReputation, reputation.AuditUnknown)
reportFailures(tries, "unknown", err, unknowns, nil)
offlines, err = reporter.recordAuditStatus(ctx, offlines, nodesReputation, reputation.AuditOffline)
reportFailures(tries, "offline", err, offlines, nil)
pendingAudits, err = reporter.recordPendingAudits(ctx, pendingAudits, nodesReputation)
reportFailures(tries, "pending", err, nil, pendingAudits)
}
}
func (reporter *reporter) recordAuditStatus(ctx context.Context, nodeIDs storj.NodeIDList, nodesReputation map[storj.NodeID]overlay.ReputationStatus, auditOutcome reputation.AuditType) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodeIDs) == 0 {
return nil, nil
}
var errors errs.Group
for _, nodeID := range nodeIDs {
err = reporter.reputations.ApplyAudit(ctx, nodeID, nodesReputation[nodeID], auditOutcome)
if err != nil {
failed = append(failed, nodeID)
errors.Add(Error.New("failed to record audit status %s in overlay for node %s: %w", auditOutcome.String(), nodeID, err))
}
}
return failed, errors.Err()
}
// recordPendingAudits updates the containment status of nodes with pending piece audits.
func (reporter *reporter) recordPendingAudits(ctx context.Context, pendingAudits []*ReverificationJob, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failed []*ReverificationJob, err error) {
defer mon.Task()(&ctx)(&err)
var errlist errs.Group
for _, pendingAudit := range pendingAudits {
logger := reporter.log.With(
zap.Stringer("Node ID", pendingAudit.Locator.NodeID),
zap.Stringer("Stream ID", pendingAudit.Locator.StreamID),
zap.Uint64("Position", pendingAudit.Locator.Position.Encode()),
zap.Int("Piece Num", pendingAudit.Locator.PieceNum))
if pendingAudit.ReverifyCount < int(reporter.maxReverifyCount) {
err := reporter.ReportReverificationNeeded(ctx, &pendingAudit.Locator)
if err != nil {
failed = append(failed, pendingAudit)
errlist.Add(err)
continue
}
logger.Info("reverification queued")
continue
}
// record failure -- max reverify count reached
logger.Info("max reverify count reached (audit failed)")
err = reporter.reputations.ApplyAudit(ctx, pendingAudit.Locator.NodeID, nodesReputation[pendingAudit.Locator.NodeID], reputation.AuditFailure)
if err != nil {
logger.Info("failed to update reputation information", zap.Error(err))
errlist.Add(err)
failed = append(failed, pendingAudit)
continue
}
_, stillContained, err := reporter.containment.Delete(ctx, &pendingAudit.Locator)
if err != nil {
if !ErrContainedNotFound.Has(err) {
errlist.Add(err)
}
continue
}
if !stillContained {
err = reporter.overlay.SetNodeContained(ctx, pendingAudit.Locator.NodeID, false)
if err != nil {
logger.Error("failed to mark node as not contained", zap.Error(err))
}
}
}
if len(failed) > 0 {
return failed, errs.Combine(Error.New("failed to record some pending audits"), errlist.Err())
}
return nil, nil
}
const maxPiecesToRemoveAtOnce = 6
// recordFailedAudits performs reporting and response to hard-failed audits. Failed audits generally
// mean the piece is gone. Remove the pieces from the relevant pointers so that the segment can be
// repaired if appropriate, and so that we don't continually dock reputation for the same missing
// piece(s).
func (reporter *reporter) recordFailedAudits(ctx context.Context, segment *metabase.Segment, failures []metabase.Piece, nodesReputation map[storj.NodeID]overlay.ReputationStatus) (failedToRecord []metabase.Piece, err error) {
defer mon.Task()(&ctx)(&err)
piecesToRemove := make(metabase.Pieces, 0, len(failures))
var errors errs.Group
for _, f := range failures {
err = reporter.reputations.ApplyAudit(ctx, f.StorageNode, nodesReputation[f.StorageNode], reputation.AuditFailure)
if err != nil {
failedToRecord = append(failedToRecord, f)
errors.Add(Error.New("failed to record audit failure in overlay for node %s: %w", f.StorageNode, err))
}
piecesToRemove = append(piecesToRemove, f)
}
if segment != nil {
// Safety check. If, say, 30 pieces all started having audit failures at the same time, the
// problem is more likely with the audit system itself and not with the pieces.
if len(piecesToRemove) > maxPiecesToRemoveAtOnce {
reporter.log.Error("cowardly refusing to remove large number of pieces for failed audit",
zap.Int("piecesToRemove", len(piecesToRemove)),
zap.Int("threshold", maxPiecesToRemoveAtOnce))
return failedToRecord, errors.Err()
}
pieces, err := segment.Pieces.Remove(piecesToRemove)
if err != nil {
errors.Add(err)
return failedToRecord, errors.Err()
}
errors.Add(reporter.metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
StreamID: segment.StreamID,
Position: segment.Position,
OldPieces: segment.Pieces,
NewRedundancy: segment.Redundancy,
NewPieces: pieces,
}))
}
return failedToRecord, errors.Err()
}
func (reporter *reporter) ReportReverificationNeeded(ctx context.Context, piece *PieceLocator) (err error) {
defer mon.Task()(&ctx)(&err)
err = reporter.containment.Insert(ctx, piece)
if err != nil {
return Error.New("failed to queue reverification audit for node: %w", err)
}
err = reporter.overlay.SetNodeContained(ctx, piece.NodeID, true)
if err != nil {
return Error.New("failed to update contained status: %w", err)
}
return nil
}
func (reporter *reporter) RecordReverificationResult(ctx context.Context, pendingJob *ReverificationJob, outcome Outcome, reputation overlay.ReputationStatus) (err error) {
defer mon.Task()(&ctx)(&err)
keepInQueue := true
report := Report{
NodesReputation: map[storj.NodeID]overlay.ReputationStatus{
pendingJob.Locator.NodeID: reputation,
},
}
switch outcome {
case OutcomeNotPerformed:
case OutcomeNotNecessary:
keepInQueue = false
case OutcomeSuccess:
report.Successes = append(report.Successes, pendingJob.Locator.NodeID)
keepInQueue = false
case OutcomeFailure:
// We have to look up the segment metainfo and pass it on to RecordAudits so that
// the segment can be modified (removing this piece). We don't persist this
// information through the reverification queue.
segmentInfo, err := reporter.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
StreamID: pendingJob.Locator.StreamID,
Position: pendingJob.Locator.Position,
})
if err != nil {
reporter.log.Error("could not look up segment after audit reverification",
zap.Stringer("stream ID", pendingJob.Locator.StreamID),
zap.Uint64("position", pendingJob.Locator.Position.Encode()),
zap.Error(err),
)
} else {
report.Segment = &segmentInfo
}
report.Fails = append(report.Fails, metabase.Piece{
StorageNode: pendingJob.Locator.NodeID,
Number: uint16(pendingJob.Locator.PieceNum),
})
keepInQueue = false
case OutcomeTimedOut:
// This will get re-added to the reverification queue, but that is idempotent
// and fine. We do need to add it to PendingAudits in order to get the
// maxReverifyCount check.
report.PendingAudits = append(report.PendingAudits, pendingJob)
case OutcomeUnknownError:
report.Unknown = append(report.Unknown, pendingJob.Locator.NodeID)
keepInQueue = false
case OutcomeNodeOffline:
report.Offlines = append(report.Offlines, pendingJob.Locator.NodeID)
}
var errList errs.Group
// apply any necessary reputation changes
reporter.RecordAudits(ctx, report)
// remove from reverifications queue if appropriate
if !keepInQueue {
_, stillContained, err := reporter.containment.Delete(ctx, &pendingJob.Locator)
if err != nil {
if !ErrContainedNotFound.Has(err) {
errList.Add(err)
}
} else if !stillContained {
err = reporter.overlay.SetNodeContained(ctx, pendingJob.Locator.NodeID, false)
errList.Add(err)
}
}
return errList.Err()
}