@@ -202,8 +202,6 @@ type compaction struct {
202
202
comparer * base.Comparer
203
203
logger Logger
204
204
version * manifest.Version
205
- stats base.InternalIteratorStats
206
- beganAt time.Time
207
205
// versionEditApplied is set to true when a compaction has completed and the
208
206
// resulting version has been installed (if successful), but the compaction
209
207
// goroutine is still cleaning up (eg, deleting obsolete files).
@@ -254,9 +252,6 @@ type compaction struct {
254
252
// single output table with the tables in the grandparent level.
255
253
maxOverlapBytes uint64
256
254
257
- // bytesWritten contains the number of bytes that have been written to outputs.
258
- bytesWritten atomic.Int64
259
-
260
255
// The boundaries of the input data.
261
256
bounds base.UserKeyBounds
262
257
@@ -305,16 +300,35 @@ type compaction struct {
305
300
l0Limits [][]byte
306
301
}
307
302
308
- metrics levelMetricsDelta
309
-
310
- pickerMetrics pickedCompactionMetrics
303
+ // metrics encapsulates various metrics collected during a compaction.
304
+ metrics compactionMetrics
311
305
312
306
grantHandle CompactionGrantHandle
313
307
314
308
tableFormat sstable.TableFormat
315
309
objCreateOpts objstorage.CreateOptions
316
310
}
317
311
312
+ // compactionMetrics contians metrics surrounding a compaction.
313
+ type compactionMetrics struct {
314
+ // beganAt is the time when the compaction began.
315
+ beganAt time.Time
316
+ // bytesWritten contains the number of bytes that have been written to
317
+ // outputs. It's updated whenever the compaction outputs'
318
+ // objstorage.Writables receive new writes. See newCompactionOutputObj.
319
+ bytesWritten atomic.Int64
320
+ // internalIterStats contains statistics from the internal iterators used by
321
+ // the compaction.
322
+ //
323
+ // TODO(jackson): Use these to power the compaction BytesRead metric.
324
+ internalIterStats base.InternalIteratorStats
325
+ // perLevel contains metrics for each level involved in the compaction.
326
+ perLevel levelMetricsDelta
327
+ // picker contains metrics from the compaction picker when the compaction
328
+ // was picked.
329
+ picker pickedCompactionMetrics
330
+ }
331
+
318
332
// inputLargestSeqNumAbsolute returns the maximum LargestSeqNumAbsolute of any
319
333
// input sstables.
320
334
func (c * compaction ) inputLargestSeqNumAbsolute () base.SeqNum {
@@ -360,11 +374,11 @@ func (c *compaction) makeInfo(jobID JobID) CompactionInfo {
360
374
info .Output .Level = numLevels - 1
361
375
}
362
376
363
- for i , score := range c .pickerMetrics .scores {
377
+ for i , score := range c .metrics . picker .scores {
364
378
info .Input [i ].Score = score
365
379
}
366
- info .SingleLevelOverlappingRatio = c .pickerMetrics .singleLevelOverlappingRatio
367
- info .MultiLevelOverlappingRatio = c .pickerMetrics .multiLevelOverlappingRatio
380
+ info .SingleLevelOverlappingRatio = c .metrics . picker .singleLevelOverlappingRatio
381
+ info .MultiLevelOverlappingRatio = c .metrics . picker .multiLevelOverlappingRatio
368
382
if len (info .Input ) > 2 {
369
383
info .Annotations = append (info .Annotations , "multilevel" )
370
384
}
@@ -393,13 +407,15 @@ func newCompaction(
393
407
bounds : pc .bounds ,
394
408
logger : opts .Logger ,
395
409
version : pc .version ,
396
- beganAt : beganAt ,
397
410
getValueSeparation : getValueSeparation ,
398
411
maxOutputFileSize : pc .maxOutputFileSize ,
399
412
maxOverlapBytes : pc .maxOverlapBytes ,
400
- pickerMetrics : pc .pickerMetrics ,
401
- grantHandle : grantHandle ,
402
- tableFormat : tableFormat ,
413
+ metrics : compactionMetrics {
414
+ beganAt : beganAt ,
415
+ picker : pc .pickerMetrics ,
416
+ },
417
+ grantHandle : grantHandle ,
418
+ tableFormat : tableFormat ,
403
419
}
404
420
// Acquire a reference to the version to ensure that files and in-memory
405
421
// version state necessary for reading files remain available. Ignoring
@@ -542,9 +558,11 @@ func newDeleteOnlyCompaction(
542
558
comparer : opts .Comparer ,
543
559
logger : opts .Logger ,
544
560
version : cur ,
545
- beganAt : beganAt ,
546
561
inputs : inputs ,
547
562
grantHandle : noopGrantHandle {},
563
+ metrics : compactionMetrics {
564
+ beganAt : beganAt ,
565
+ },
548
566
}
549
567
c .deleteOnly .hints = hints
550
568
c .deleteOnly .exciseEnabled = exciseEnabled
@@ -664,13 +682,15 @@ func newFlush(
664
682
comparer : opts .Comparer ,
665
683
logger : opts .Logger ,
666
684
version : cur ,
667
- beganAt : beganAt ,
668
685
inputs : []compactionLevel {{level : - 1 }, {level : 0 }},
669
686
getValueSeparation : getValueSeparation ,
670
687
maxOutputFileSize : math .MaxUint64 ,
671
688
maxOverlapBytes : math .MaxUint64 ,
672
689
grantHandle : noopGrantHandle {},
673
690
tableFormat : tableFormat ,
691
+ metrics : compactionMetrics {
692
+ beganAt : beganAt ,
693
+ },
674
694
}
675
695
c .flush .flushables = flushing
676
696
c .flush .l0Limits = l0Organizer .FlushSplitKeys ()
@@ -1030,7 +1050,7 @@ func (c *compaction) newInputIters(
1030
1050
// iter.
1031
1051
pointIter = iters [0 ]
1032
1052
if len (iters ) > 1 {
1033
- pointIter = newMergingIter (c .logger , & c .stats , cmp , nil , iters ... )
1053
+ pointIter = newMergingIter (c .logger , & c .metrics . internalIterStats , cmp , nil , iters ... )
1034
1054
}
1035
1055
1036
1056
// In normal operation, levelIter iterates over the point operations in a
@@ -1382,10 +1402,10 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
1382
1402
ingestFlushable := c .flush .flushables [0 ].flushable .(* ingestedFlushable )
1383
1403
1384
1404
updateLevelMetricsOnExcise := func (m * manifest.TableMetadata , level int , added []manifest.NewTableEntry ) {
1385
- levelMetrics := c .metrics [level ]
1405
+ levelMetrics := c .metrics . perLevel [level ]
1386
1406
if levelMetrics == nil {
1387
1407
levelMetrics = & LevelMetrics {}
1388
- c .metrics [level ] = levelMetrics
1408
+ c .metrics . perLevel [level ] = levelMetrics
1389
1409
}
1390
1410
levelMetrics .TablesCount --
1391
1411
levelMetrics .TablesSize -= int64 (m .Size )
@@ -1448,11 +1468,7 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
1448
1468
level : level ,
1449
1469
})
1450
1470
}
1451
- levelMetrics := c .metrics [level ]
1452
- if levelMetrics == nil {
1453
- levelMetrics = & LevelMetrics {}
1454
- c .metrics [level ] = levelMetrics
1455
- }
1471
+ levelMetrics := c .metrics .perLevel .level (level )
1456
1472
levelMetrics .TableBytesIngested += file .Size
1457
1473
levelMetrics .TablesIngested ++
1458
1474
}
@@ -1628,7 +1644,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
1628
1644
// oldest unflushed memtable.
1629
1645
ve .MinUnflushedLogNum = minUnflushedLogNum
1630
1646
if c .kind != compactionKindIngestedFlushable {
1631
- l0Metrics := c .metrics [ 0 ]
1647
+ l0Metrics := c .metrics . perLevel . level ( 0 )
1632
1648
if d .opts .DisableWAL {
1633
1649
// If the WAL is disabled, every flushable has a zero [logSize],
1634
1650
// resulting in zero bytes in. Instead, use the number of bytes we
@@ -1677,7 +1693,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
1677
1693
return versionUpdate {
1678
1694
VE : ve ,
1679
1695
JobID : jobID ,
1680
- Metrics : c .metrics ,
1696
+ Metrics : c .metrics . perLevel ,
1681
1697
InProgressCompactionsFn : func () []compactionInfo { return d .getInProgressCompactionInfoLocked (c ) },
1682
1698
}, nil
1683
1699
})
@@ -1692,7 +1708,8 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
1692
1708
1693
1709
d .clearCompactingState (c , err != nil )
1694
1710
delete (d .mu .compact .inProgress , c )
1695
- d .mu .versions .incrementCompactions (c .kind , c .extraLevels , c .pickerMetrics , c .bytesWritten .Load (), err )
1711
+ d .mu .versions .incrementCompactions (c .kind , c .extraLevels , c .metrics .picker ,
1712
+ c .metrics .bytesWritten .Load (), err )
1696
1713
1697
1714
var flushed flushableList
1698
1715
if err == nil {
@@ -1702,7 +1719,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
1702
1719
d .updateTableStatsLocked (ve .NewTables )
1703
1720
if ingest {
1704
1721
d .mu .versions .metrics .Flush .AsIngestCount ++
1705
- for _ , l := range c .metrics {
1722
+ for _ , l := range c .metrics . perLevel {
1706
1723
if l != nil {
1707
1724
d .mu .versions .metrics .Flush .AsIngestBytes += l .TableBytesIngested
1708
1725
d .mu .versions .metrics .Flush .AsIngestTableCount += l .TablesIngested
@@ -2476,7 +2493,7 @@ func (d *DB) compact(c *compaction, errChannel chan error) {
2476
2493
// must be atomic with the above removal of c from
2477
2494
// d.mu.compact.InProgress to ensure Metrics.Compact.Duration does not
2478
2495
// miss or double count a completing compaction's duration.
2479
- d .mu .compact .duration += d .timeNow ().Sub (c .beganAt )
2496
+ d .mu .compact .duration += d .timeNow ().Sub (c .metrics . beganAt )
2480
2497
}()
2481
2498
// Done must not be called while holding any lock that needs to be
2482
2499
// acquired by Schedule. Also, it must be called after new Version has
@@ -2625,7 +2642,7 @@ func (d *DB) compact1(jobID JobID, c *compaction) (err error) {
2625
2642
return versionUpdate {
2626
2643
VE : ve ,
2627
2644
JobID : jobID ,
2628
- Metrics : c .metrics ,
2645
+ Metrics : c .metrics . perLevel ,
2629
2646
InProgressCompactionsFn : func () []compactionInfo { return d .getInProgressCompactionInfoLocked (c ) },
2630
2647
}, nil
2631
2648
})
@@ -2646,10 +2663,11 @@ func (d *DB) compact1(jobID JobID, c *compaction) (err error) {
2646
2663
// NB: clearing compacting state must occur before updating the read state;
2647
2664
// L0Sublevels initialization depends on it.
2648
2665
d .clearCompactingState (c , err != nil )
2649
- d .mu .versions .incrementCompactions (c .kind , c .extraLevels , c .pickerMetrics , c .bytesWritten .Load (), err )
2650
- d .mu .versions .incrementCompactionBytes (- c .bytesWritten .Load ())
2666
+ d .mu .versions .incrementCompactions (c .kind , c .extraLevels , c .metrics .picker ,
2667
+ c .metrics .bytesWritten .Load (), err )
2668
+ d .mu .versions .incrementCompactionBytes (- c .metrics .bytesWritten .Load ())
2651
2669
2652
- info .TotalDuration = d .timeNow ().Sub (c .beganAt )
2670
+ info .TotalDuration = d .timeNow ().Sub (c .metrics . beganAt )
2653
2671
d .opts .EventListener .CompactionEnd (info )
2654
2672
2655
2673
// Update the read state before deleting obsolete files because the
@@ -2813,9 +2831,8 @@ func (d *DB) runCopyCompaction(
2813
2831
if errors .Is (err , sstable .ErrEmptySpan ) {
2814
2832
// The virtual table was empty. Just remove the backing file.
2815
2833
// Note that deleteOnExit is true so we will delete the created object.
2816
- c .metrics [c .outputLevel .level ] = & LevelMetrics {
2817
- TableBytesIn : inputMeta .Size ,
2818
- }
2834
+ outputMetrics := c .metrics .perLevel .level (c .outputLevel .level )
2835
+ outputMetrics .TableBytesIn = inputMeta .Size
2819
2836
2820
2837
return ve , compact.Stats {}, nil
2821
2838
}
@@ -2839,11 +2856,10 @@ func (d *DB) runCopyCompaction(
2839
2856
if newMeta .Virtual {
2840
2857
ve .CreatedBackingTables = []* manifest.TableBacking {newMeta .TableBacking }
2841
2858
}
2842
- c .metrics [c .outputLevel .level ] = & LevelMetrics {
2843
- TableBytesIn : inputMeta .Size ,
2844
- TableBytesCompacted : newMeta .Size ,
2845
- TablesCompacted : 1 ,
2846
- }
2859
+ outputMetrics := c .metrics .perLevel .level (c .outputLevel .level )
2860
+ outputMetrics .TableBytesIn = inputMeta .Size
2861
+ outputMetrics .TableBytesCompacted = newMeta .Size
2862
+ outputMetrics .TablesCompacted = 1
2847
2863
2848
2864
if err := d .objProvider .Sync (); err != nil {
2849
2865
return nil , compact.Stats {}, err
@@ -3022,12 +3038,11 @@ func (d *DB) runDeleteOnlyCompaction(
3022
3038
DeletedTables : map [manifest.DeletedTableEntry ]* manifest.TableMetadata {},
3023
3039
}
3024
3040
for _ , cl := range c .inputs {
3025
- levelMetrics := & LevelMetrics {}
3041
+ levelMetrics := c . metrics . perLevel . level ( cl . level )
3026
3042
err := d .runDeleteOnlyCompactionForLevel (cl , levelMetrics , ve , snapshots , fragments , c .deleteOnly .exciseEnabled )
3027
3043
if err != nil {
3028
3044
return nil , stats , err
3029
3045
}
3030
- c .metrics [cl .level ] = levelMetrics
3031
3046
}
3032
3047
// Remove any files that were added and deleted in the same versionEdit.
3033
3048
ve .NewTables = slices .DeleteFunc (ve .NewTables , func (e manifest.NewTableEntry ) bool {
@@ -3067,10 +3082,9 @@ func (d *DB) runMoveCompaction(
3067
3082
if c .cancel .Load () {
3068
3083
return ve , stats , ErrCancelledCompaction
3069
3084
}
3070
- c .metrics [c .outputLevel .level ] = & LevelMetrics {
3071
- TableBytesMoved : meta .Size ,
3072
- TablesMoved : 1 ,
3073
- }
3085
+ outputMetrics := c .metrics .perLevel .level (c .outputLevel .level )
3086
+ outputMetrics .TableBytesMoved = meta .Size
3087
+ outputMetrics .TablesMoved = 1
3074
3088
ve = & manifest.VersionEdit {
3075
3089
DeletedTables : map [manifest.DeletedTableEntry ]* manifest.TableMetadata {
3076
3090
{Level : c .startLevel .level , FileNum : meta .TableNum }: meta ,
@@ -3200,7 +3214,7 @@ func (d *DB) compactAndWrite(
3200
3214
defer c .bufferPool .Release ()
3201
3215
blockReadEnv := block.ReadEnv {
3202
3216
BufferPool : & c .bufferPool ,
3203
- Stats : & c .stats ,
3217
+ Stats : & c .metrics . internalIterStats ,
3204
3218
IterStats : d .fileCache .SSTStatsCollector ().Accumulator (
3205
3219
uint64 (uintptr (unsafe .Pointer (c ))),
3206
3220
categoryCompaction ,
@@ -3333,13 +3347,13 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*manifest.VersionEd
3333
3347
}
3334
3348
3335
3349
startLevelBytes := c .startLevel .files .TableSizeSum ()
3336
- outputMetrics := & LevelMetrics {
3337
- TableBytesIn : startLevelBytes ,
3338
- // TODO(jackson): This BytesRead value does not include any blob files
3339
- // written. It either should, or we should add a separate metric.
3340
- TableBytesRead : c . outputLevel . files . TableSizeSum (),
3341
- BlobBytesCompacted : result . Stats . CumulativeBlobFileSize ,
3342
- }
3350
+
3351
+ outputMetrics := c . metrics . perLevel . level ( c . outputLevel . level )
3352
+ outputMetrics . TableBytesIn = startLevelBytes
3353
+ // TODO(jackson): This BytesRead value does not include any blob files
3354
+ // written. It either should, or we should add a separate metric.
3355
+ outputMetrics . TableBytesRead = c . outputLevel . files . TableSizeSum ()
3356
+ outputMetrics . BlobBytesCompacted = result . Stats . CumulativeBlobFileSize
3343
3357
if c .flush .flushables != nil {
3344
3358
outputMetrics .BlobBytesFlushed = result .Stats .CumulativeBlobFileSize
3345
3359
}
@@ -3348,12 +3362,11 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*manifest.VersionEd
3348
3362
}
3349
3363
outputMetrics .TableBytesRead += outputMetrics .TableBytesIn
3350
3364
3351
- c .metrics [c .outputLevel .level ] = outputMetrics
3352
- if len (c .flush .flushables ) == 0 && c .metrics [c .startLevel .level ] == nil {
3353
- c .metrics [c .startLevel .level ] = & LevelMetrics {}
3365
+ if len (c .flush .flushables ) == 0 {
3366
+ c .metrics .perLevel .level (c .startLevel .level )
3354
3367
}
3355
3368
if len (c .extraLevels ) > 0 {
3356
- c .metrics [ c .extraLevels [0 ].level ] = & LevelMetrics {}
3369
+ c .metrics . perLevel . level ( c .extraLevels [0 ].level )
3357
3370
outputMetrics .MultiLevel .TableBytesInTop = startLevelBytes
3358
3371
outputMetrics .MultiLevel .TableBytesIn = outputMetrics .TableBytesIn
3359
3372
outputMetrics .MultiLevel .TableBytesRead = outputMetrics .TableBytesRead
@@ -3515,7 +3528,7 @@ func (d *DB) newCompactionOutputObj(
3515
3528
writable = & compactionWritable {
3516
3529
Writable : writable ,
3517
3530
versions : d .mu .versions ,
3518
- written : & c .bytesWritten ,
3531
+ written : & c .metrics . bytesWritten ,
3519
3532
}
3520
3533
}
3521
3534
return writable , objMeta , nil
0 commit comments