-
Notifications
You must be signed in to change notification settings - Fork 451
/
cleanup.go
467 lines (408 loc) · 17.1 KB
/
cleanup.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package storage
import (
"fmt"
"sort"
"sync"
"time"
"github.com/m3db/m3/src/dbnode/clock"
"github.com/m3db/m3/src/dbnode/persist"
"github.com/m3db/m3/src/dbnode/persist/fs"
"github.com/m3db/m3/src/dbnode/persist/fs/commitlog"
"github.com/m3db/m3/src/dbnode/retention"
xerrors "github.com/m3db/m3x/errors"
"github.com/m3db/m3x/ident"
xlog "github.com/m3db/m3x/log"
"github.com/pborman/uuid"
"github.com/uber-go/tally"
)
type commitLogFilesFn func(commitlog.Options) (persist.CommitLogFiles, []commitlog.ErrorWithPath, error)
type snapshotMetadataFilesFn func(fs.Options) ([]fs.SnapshotMetadata, []fs.SnapshotMetadataErrorWithPaths, error)
type snapshotFilesFn func(filePathPrefix string, namespace ident.ID, shard uint32) (fs.FileSetFilesSlice, error)
type deleteFilesFn func(files []string) error
type deleteInactiveDirectoriesFn func(parentDirPath string, activeDirNames []string) error
// Narrow interface so as not to expose all the functionality of the commitlog
// to the cleanup manager.
type activeCommitlogs interface {
ActiveLogs() (persist.CommitLogFiles, error)
}
type cleanupManager struct {
sync.RWMutex
database database
activeCommitlogs activeCommitlogs
opts Options
nowFn clock.NowFn
filePathPrefix string
commitLogsDir string
commitLogFilesFn commitLogFilesFn
snapshotMetadataFilesFn snapshotMetadataFilesFn
snapshotFilesFn snapshotFilesFn
deleteFilesFn deleteFilesFn
deleteInactiveDirectoriesFn deleteInactiveDirectoriesFn
cleanupInProgress bool
metrics cleanupManagerMetrics
}
type cleanupManagerMetrics struct {
status tally.Gauge
corruptCommitlogFile tally.Counter
corruptSnapshotFile tally.Counter
corruptSnapshotMetadataFile tally.Counter
deletedCommitlogFile tally.Counter
deletedSnapshotFile tally.Counter
deletedSnapshotMetadataFile tally.Counter
}
func newCleanupManagerMetrics(scope tally.Scope) cleanupManagerMetrics {
clScope := scope.SubScope("commitlog")
sScope := scope.SubScope("snapshot")
smScope := scope.SubScope("snapshot-metadata")
return cleanupManagerMetrics{
status: scope.Gauge("cleanup"),
corruptCommitlogFile: clScope.Counter("corrupt"),
corruptSnapshotFile: sScope.Counter("corrupt"),
corruptSnapshotMetadataFile: smScope.Counter("corrupt"),
deletedCommitlogFile: clScope.Counter("deleted"),
deletedSnapshotFile: sScope.Counter("deleted"),
deletedSnapshotMetadataFile: smScope.Counter("deleted"),
}
}
func newCleanupManager(
database database, activeLogs activeCommitlogs, scope tally.Scope) databaseCleanupManager {
opts := database.Options()
filePathPrefix := opts.CommitLogOptions().FilesystemOptions().FilePathPrefix()
commitLogsDir := fs.CommitLogsDirPath(filePathPrefix)
return &cleanupManager{
database: database,
activeCommitlogs: activeLogs,
opts: opts,
nowFn: opts.ClockOptions().NowFn(),
filePathPrefix: filePathPrefix,
commitLogsDir: commitLogsDir,
commitLogFilesFn: commitlog.Files,
snapshotMetadataFilesFn: fs.SortedSnapshotMetadataFiles,
snapshotFilesFn: fs.SnapshotFiles,
deleteFilesFn: fs.DeleteFiles,
deleteInactiveDirectoriesFn: fs.DeleteInactiveDirectories,
metrics: newCleanupManagerMetrics(scope),
}
}
func (m *cleanupManager) Cleanup(t time.Time) error {
m.Lock()
m.cleanupInProgress = true
m.Unlock()
defer func() {
m.Lock()
m.cleanupInProgress = false
m.Unlock()
}()
multiErr := xerrors.NewMultiError()
if err := m.cleanupExpiredDataFiles(t); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when cleaning up data files for %v: %v", t, err))
}
if err := m.cleanupExpiredIndexFiles(t); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when cleaning up index files for %v: %v", t, err))
}
if err := m.deleteInactiveDataFiles(); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when deleting inactive data files for %v: %v", t, err))
}
if err := m.deleteInactiveDataSnapshotFiles(); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when deleting inactive snapshot files for %v: %v", t, err))
}
if err := m.deleteInactiveNamespaceFiles(); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when deleting inactive namespace files for %v: %v", t, err))
}
if err := m.cleanupSnapshotsAndCommitlogs(); err != nil {
multiErr = multiErr.Add(fmt.Errorf(
"encountered errors when cleaning up snapshot and commitlog files: %v", err))
}
return multiErr.FinalError()
}
func (m *cleanupManager) Report() {
m.RLock()
cleanupInProgress := m.cleanupInProgress
m.RUnlock()
if cleanupInProgress {
m.metrics.status.Update(1)
} else {
m.metrics.status.Update(0)
}
}
func (m *cleanupManager) deleteInactiveNamespaceFiles() error {
var namespaceDirNames []string
filePathPrefix := m.database.Options().CommitLogOptions().FilesystemOptions().FilePathPrefix()
dataDirPath := fs.DataDirPath(filePathPrefix)
namespaces, err := m.database.GetOwnedNamespaces()
if err != nil {
return err
}
for _, n := range namespaces {
namespaceDirNames = append(namespaceDirNames, n.ID().String())
}
return m.deleteInactiveDirectoriesFn(dataDirPath, namespaceDirNames)
}
// deleteInactiveDataFiles will delete data files for shards that the node no longer owns
// which can occur in the case of topology changes
func (m *cleanupManager) deleteInactiveDataFiles() error {
return m.deleteInactiveDataFileSetFiles(fs.NamespaceDataDirPath)
}
// deleteInactiveDataSnapshotFiles will delete snapshot files for shards that the node no longer owns
// which can occur in the case of topology changes
func (m *cleanupManager) deleteInactiveDataSnapshotFiles() error {
return m.deleteInactiveDataFileSetFiles(fs.NamespaceSnapshotsDirPath)
}
func (m *cleanupManager) deleteInactiveDataFileSetFiles(filesetFilesDirPathFn func(string, ident.ID) string) error {
multiErr := xerrors.NewMultiError()
filePathPrefix := m.database.Options().CommitLogOptions().FilesystemOptions().FilePathPrefix()
namespaces, err := m.database.GetOwnedNamespaces()
if err != nil {
return err
}
for _, n := range namespaces {
var activeShards []string
namespaceDirPath := filesetFilesDirPathFn(filePathPrefix, n.ID())
for _, s := range n.GetOwnedShards() {
shard := fmt.Sprintf("%d", s.ID())
activeShards = append(activeShards, shard)
}
multiErr = multiErr.Add(m.deleteInactiveDirectoriesFn(namespaceDirPath, activeShards))
}
return multiErr.FinalError()
}
func (m *cleanupManager) cleanupExpiredDataFiles(t time.Time) error {
multiErr := xerrors.NewMultiError()
namespaces, err := m.database.GetOwnedNamespaces()
if err != nil {
return err
}
for _, n := range namespaces {
if !n.Options().CleanupEnabled() {
continue
}
earliestToRetain := retention.FlushTimeStart(n.Options().RetentionOptions(), t)
shards := n.GetOwnedShards()
multiErr = multiErr.Add(m.cleanupExpiredNamespaceDataFiles(earliestToRetain, shards))
}
return multiErr.FinalError()
}
func (m *cleanupManager) cleanupExpiredIndexFiles(t time.Time) error {
namespaces, err := m.database.GetOwnedNamespaces()
if err != nil {
return err
}
multiErr := xerrors.NewMultiError()
for _, n := range namespaces {
if !n.Options().CleanupEnabled() || !n.Options().IndexOptions().Enabled() {
continue
}
idx, err := n.GetIndex()
if err != nil {
multiErr = multiErr.Add(err)
continue
}
multiErr = multiErr.Add(idx.CleanupExpiredFileSets(t))
}
return multiErr.FinalError()
}
func (m *cleanupManager) cleanupExpiredNamespaceDataFiles(earliestToRetain time.Time, shards []databaseShard) error {
multiErr := xerrors.NewMultiError()
for _, shard := range shards {
if err := shard.CleanupExpiredFileSets(earliestToRetain); err != nil {
multiErr = multiErr.Add(err)
}
}
return multiErr.FinalError()
}
// The goal of the cleanupSnapshotsAndCommitlogs function is to delete all snapshots files, snapshot metadata
// files, and commitlog files except for those that are currently required for recovery from a node failure.
// According to the snapshotting / commitlog rotation logic, the files that are required for a complete
// recovery are:
//
// 1. The most recent (highest index) snapshot metadata files.
// 2. All snapshot files whose associated snapshot ID matches the snapshot ID of the most recent snapshot
// metadata file.
// 3. All commitlog files whose index is larger than or equal to the index of the commitlog identifier stored
// in the most recent snapshot metadata file. This is because the snapshotting and commitlog rotation process
// guarantees that the most recent snapshot contains all data stored in commitlogs that were created before
// the rotation / snapshot process began.
//
// cleanupSnapshotsAndCommitlogs accomplishes this goal by performing the following steps:
//
// 1. List all the snapshot metadata files on disk.
// 2. Identify the most recent one (highest index).
// 3. For every namespace/shard/block combination, delete all snapshot files that match one of the following criteria:
// 1. Snapshot files whose associated snapshot ID does not match the snapshot ID of the most recent
// snapshot metadata file.
// 2. Snapshot files that are corrupt.
// 4. Delete all snapshot metadata files prior to the most recent once.
// 5. Delete corrupt snapshot metadata files.
// 6. List all the commitlog files on disk.
// 7. List all the commitlog files that are being actively written to.
// 8. Delete all commitlog files whose index is lower than the index of the commitlog file referenced in the
// most recent snapshot metadata file (ignoring any commitlog files being actively written to.)
// 9. Delete all corrupt commitlog files (ignoring any commitlog files being actively written to.)
//
// This process is also modeled formally in TLA+ in the file `SnapshotsSpec.tla`.
func (m *cleanupManager) cleanupSnapshotsAndCommitlogs() (finalErr error) {
namespaces, err := m.database.GetOwnedNamespaces()
if err != nil {
return err
}
fsOpts := m.opts.CommitLogOptions().FilesystemOptions()
snapshotMetadatas, snapshotMetadataErrorsWithPaths, err := m.snapshotMetadataFilesFn(fsOpts)
if err != nil {
return err
}
if len(snapshotMetadatas) == 0 {
// No cleanup can be performed until we have at least one complete snapshot.
return nil
}
// They should technically already be sorted, but better to be safe.
sort.Slice(snapshotMetadatas, func(i, j int) bool {
return snapshotMetadatas[i].ID.Index < snapshotMetadatas[j].ID.Index
})
sortedSnapshotMetadatas := snapshotMetadatas
// Sanity check.
lastMetadataIndex := int64(-1)
for _, snapshotMetadata := range sortedSnapshotMetadatas {
currIndex := snapshotMetadata.ID.Index
if currIndex == lastMetadataIndex {
// Should never happen.
return fmt.Errorf(
"found two snapshot metadata files with duplicate index: %d", currIndex)
}
lastMetadataIndex = currIndex
}
if len(sortedSnapshotMetadatas) == 0 {
// No cleanup can be performed until we have at least one complete snapshot.
return nil
}
var (
multiErr = xerrors.NewMultiError()
filesToDelete = []string{}
mostRecentSnapshot = sortedSnapshotMetadatas[len(sortedSnapshotMetadatas)-1]
)
defer func() {
// Use a defer to perform the final file deletion so that we can attempt to cleanup *some* files
// when we encounter partial errors on a best effort basis.
multiErr = multiErr.Add(finalErr)
multiErr = multiErr.Add(m.deleteFilesFn(filesToDelete))
finalErr = multiErr.FinalError()
}()
for _, ns := range namespaces {
for _, s := range ns.GetOwnedShards() {
shardSnapshots, err := m.snapshotFilesFn(fsOpts.FilePathPrefix(), ns.ID(), s.ID())
if err != nil {
multiErr = multiErr.Add(fmt.Errorf("err reading snapshot files for ns: %s and shard: %d, err: %v", ns.ID(), s.ID(), err))
continue
}
for _, snapshot := range shardSnapshots {
_, snapshotID, err := snapshot.SnapshotTimeAndID()
if err != nil {
// If we can't parse the snapshotID, assume the snapshot is corrupt and delete it. This could be caused
// by a variety of situations, like a node crashing while writing out a set of snapshot files and should
// have no impact on correctness as the snapshot files from previous (successful) snapshot will still be
// retained.
m.metrics.corruptSnapshotFile.Inc(1)
m.opts.InstrumentOptions().Logger().WithFields(
xlog.NewField("err", err),
xlog.NewField("files", snapshot.AbsoluteFilepaths),
).Errorf(
"encountered corrupt snapshot file during cleanup, marking files for deletion")
filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths...)
continue
}
if !uuid.Equal(snapshotID, mostRecentSnapshot.ID.UUID) {
// If the UUID of the snapshot files doesn't match the most recent snapshot
// then its safe to delete because it means we have a more recently complete set.
m.metrics.deletedSnapshotFile.Inc(1)
filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths...)
}
}
}
}
// Delete all snapshot metadatas prior to the most recent one.
for _, snapshot := range sortedSnapshotMetadatas[:len(sortedSnapshotMetadatas)-1] {
m.metrics.deletedSnapshotMetadataFile.Inc(1)
filesToDelete = append(filesToDelete, snapshot.AbsoluteFilepaths()...)
}
// Delete corrupt snapshot metadata files.
for _, errorWithPath := range snapshotMetadataErrorsWithPaths {
m.metrics.corruptSnapshotMetadataFile.Inc(1)
m.opts.InstrumentOptions().Logger().WithFields(
xlog.NewField("err", errorWithPath.Error),
xlog.NewField("metadataFilePath", errorWithPath.MetadataFilePath),
xlog.NewField("checkpointFilePath", errorWithPath.CheckpointFilePath),
).Errorf(
"encountered corrupt snapshot metadata file during cleanup, marking files for deletion")
filesToDelete = append(filesToDelete, errorWithPath.MetadataFilePath)
filesToDelete = append(filesToDelete, errorWithPath.CheckpointFilePath)
}
// Figure out which commitlog files exist on disk.
files, commitlogErrorsWithPaths, err := m.commitLogFilesFn(m.opts.CommitLogOptions())
if err != nil {
// Hard failure here because the remaining cleanup logic relies on this data
// being available.
return err
}
// Figure out which commitlog files are being actively written to.
activeCommitlogs, err := m.activeCommitlogs.ActiveLogs()
if err != nil {
// Hard failure here because the remaining cleanup logic relies on this data
// being available.
return err
}
// Delete all commitlog files prior to the one captured by the most recent snapshot.
for _, file := range files {
if activeCommitlogs.Contains(file.FilePath) {
// Skip over any commitlog files that are being actively written to.
continue
}
if file.Index < mostRecentSnapshot.CommitlogIdentifier.Index {
m.metrics.deletedCommitlogFile.Inc(1)
filesToDelete = append(filesToDelete, file.FilePath)
}
}
// Delete corrupt commitlog files.
for _, errorWithPath := range commitlogErrorsWithPaths {
if activeCommitlogs.Contains(errorWithPath.Path()) {
// Skip over any commitlog files that are being actively written to. Note that is
// is common for an active commitlog to appear corrupt because the info header has
// not been flushed yet.
continue
}
m.metrics.corruptCommitlogFile.Inc(1)
// If we were unable to read the commit log files info header, then we're forced to assume
// that the file is corrupt and remove it. This can happen in situations where M3DB experiences
// sudden shutdown.
m.opts.InstrumentOptions().Logger().WithFields(
xlog.NewField("err", errorWithPath.Error()),
xlog.NewField("path", errorWithPath.Path()),
).Errorf(
"encountered corrupt commitlog file during cleanup, marking file for deletion: %s",
errorWithPath.Error())
filesToDelete = append(filesToDelete, errorWithPath.Path())
}
return finalErr
}