Skip to content

Commit 9f32f9a

Browse files
committed
metrics: use CountAndSize in LevelMetrics
1 parent b39fdcf commit 9f32f9a

File tree

12 files changed

+124
-151
lines changed

12 files changed

+124
-151
lines changed

cmd/pebble/write_bench.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -293,7 +293,7 @@ func runWriteBenchmark(_ *cobra.Command, args []string) error {
293293
}
294294

295295
// Print the current stats.
296-
l0Files := m.Levels[0].TablesCount
296+
l0Files := m.Levels[0].Tables.Count
297297
l0Sublevels := m.Levels[0].Sublevels
298298
nLevels := 0
299299
for _, l := range m.Levels {

cmd/pebble/ycsb.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -622,7 +622,7 @@ func (y *ycsb) done(elapsed time.Duration) {
622622
resultHist.TotalCount(),
623623
float64(resultHist.TotalCount())/elapsed.Seconds(),
624624
total.TableBytesRead,
625-
total.TableBytesFlushed+total.TableBytesCompacted+total.BlobBytesFlushed+total.BlobBytesCompacted,
625+
total.TablesFlushed.Bytes+total.TablesCompacted.Bytes+total.BlobBytesFlushed+total.BlobBytesCompacted,
626626
float64(readAmpSum)/float64(readAmpCount),
627627
total.WriteAmp(),
628628
)

compaction.go

Lines changed: 8 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1572,8 +1572,7 @@ func (d *DB) runIngestFlush(c *tableCompaction) (*manifest.VersionEdit, error) {
15721572
})
15731573
}
15741574
levelMetrics := c.metrics.perLevel.level(level)
1575-
levelMetrics.TableBytesIngested += file.Size
1576-
levelMetrics.TablesIngested++
1575+
levelMetrics.TablesIngested.Inc(file.Size)
15771576
}
15781577
if ingestFlushable.exciseSpan.Valid() {
15791578
exciseBounds := ingestFlushable.exciseSpan.UserKeyBounds()
@@ -1779,7 +1778,7 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
17791778
// resulting in zero bytes in. Instead, use the number of bytes we
17801779
// flushed as the BytesIn. This ensures we get a reasonable w-amp
17811780
// calculation even when the WAL is disabled.
1782-
l0Metrics.TableBytesIn = l0Metrics.TableBytesFlushed + l0Metrics.BlobBytesFlushed
1781+
l0Metrics.TableBytesIn = l0Metrics.TablesFlushed.Bytes + l0Metrics.BlobBytesFlushed
17831782
} else {
17841783
for i := 0; i < n; i++ {
17851784
l0Metrics.TableBytesIn += d.mu.mem.queue[i].logSize
@@ -1853,8 +1852,8 @@ func (d *DB) flush1() (bytesFlushed uint64, err error) {
18531852
d.mu.versions.metrics.Flush.AsIngestCount++
18541853
for _, l := range c.metrics.perLevel {
18551854
if l != nil {
1856-
d.mu.versions.metrics.Flush.AsIngestBytes += l.TableBytesIngested
1857-
d.mu.versions.metrics.Flush.AsIngestTableCount += l.TablesIngested
1855+
d.mu.versions.metrics.Flush.AsIngestBytes += l.TablesIngested.Bytes
1856+
d.mu.versions.metrics.Flush.AsIngestTableCount += l.TablesIngested.Count
18581857
}
18591858
}
18601859
}
@@ -2970,8 +2969,7 @@ func (d *DB) runCopyCompaction(
29702969
}
29712970
outputMetrics := c.metrics.perLevel.level(c.outputLevel.level)
29722971
outputMetrics.TableBytesIn = inputMeta.Size
2973-
outputMetrics.TableBytesCompacted = newMeta.Size
2974-
outputMetrics.TablesCompacted = 1
2972+
outputMetrics.TablesCompacted.Inc(newMeta.Size)
29752973

29762974
if err := d.objProvider.Sync(); err != nil {
29772975
return nil, compact.Stats{}, []compact.OutputBlob{}, err
@@ -3218,8 +3216,7 @@ func (d *DB) runMoveCompaction(
32183216
return ve, stats, blobs, ErrCancelledCompaction
32193217
}
32203218
outputMetrics := c.metrics.perLevel.level(c.outputLevel.level)
3221-
outputMetrics.TableBytesMoved = meta.Size
3222-
outputMetrics.TablesMoved = 1
3219+
outputMetrics.TablesMoved.Inc(meta.Size)
32233220
ve = &manifest.VersionEdit{
32243221
DeletedTables: map[manifest.DeletedTableEntry]*manifest.TableMetadata{
32253222
{Level: c.startLevel.level, FileNum: meta.TableNum}: meta,
@@ -3592,11 +3589,9 @@ func (c *tableCompaction) makeVersionEdit(result compact.Result) (*manifest.Vers
35923589
}
35933590
// Update metrics.
35943591
if c.flush.flushables == nil {
3595-
outputMetrics.TablesCompacted++
3596-
outputMetrics.TableBytesCompacted += fileMeta.Size
3592+
outputMetrics.TablesCompacted.Inc(fileMeta.Size)
35973593
} else {
3598-
outputMetrics.TablesFlushed++
3599-
outputMetrics.TableBytesFlushed += fileMeta.Size
3594+
outputMetrics.TablesFlushed.Inc(fileMeta.Size)
36003595
}
36013596
outputMetrics.Additional.BytesWrittenDataBlocks += t.WriterMeta.Properties.DataSize
36023597
outputMetrics.Additional.BytesWrittenValueBlocks += t.WriterMeta.Properties.ValueBlocksSize

compaction_picker_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1621,7 +1621,7 @@ func TestCompactionPickerScores(t *testing.T) {
16211621
tw := tabwriter.NewWriter(&buf, 2, 1, 4, ' ', 0)
16221622
fmt.Fprintf(tw, "Level\tSize\tScore\tFill factor\tCompensated fill factor\n")
16231623
for l, lm := range d.Metrics().Levels {
1624-
fmt.Fprintf(tw, "L%d\t%s\t%.2f\t%.2f\t%.2f\n", l, humanize.Bytes.Int64(lm.AggregateSize()).String(),
1624+
fmt.Fprintf(tw, "L%d\t%s\t%.2f\t%.2f\t%.2f\n", l, humanize.Bytes.Uint64(lm.AggregateSize()).String(),
16251625
lm.Score, lm.FillFactor, lm.CompensatedFillFactor)
16261626
}
16271627
tw.Flush()

file_cache_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -329,7 +329,7 @@ func TestVirtualReadsWiring(t *testing.T) {
329329
require.NoError(t, d.Apply(b, nil))
330330
require.NoError(t, d.Flush())
331331
require.NoError(t, d.Compact(context.Background(), []byte{'a'}, []byte{'b'}, false))
332-
require.Equal(t, 1, int(d.Metrics().Levels[6].TablesCount))
332+
require.Equal(t, 1, int(d.Metrics().Levels[6].Tables.Count))
333333

334334
d.mu.Lock()
335335

@@ -426,7 +426,7 @@ func TestVirtualReadsWiring(t *testing.T) {
426426
d.mu.Unlock()
427427

428428
// Confirm that there were only 2 virtual sstables in L6.
429-
require.Equal(t, 2, int(d.Metrics().Levels[6].TablesCount))
429+
require.Equal(t, 2, int(d.Metrics().Levels[6].Tables.Count))
430430

431431
// These reads will go through the file cache.
432432
iter, _ := d.NewIter(nil)

ingest.go

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2112,8 +2112,7 @@ func (d *DB) ingestApply(
21122112
}
21132113
f.Meta = m
21142114
levelMetrics := metrics.level(f.Level)
2115-
levelMetrics.TableBytesIngested += m.Size
2116-
levelMetrics.TablesIngested++
2115+
levelMetrics.TablesIngested.Inc(m.Size)
21172116
}
21182117
// replacedTables maps files excised due to exciseSpan (or splitFiles returned
21192118
// by ingestTargetLevel), to files that were created to replace it. This map

iterator_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2781,7 +2781,7 @@ func BenchmarkSeekPrefixTombstones(b *testing.B) {
27812781
}
27822782

27832783
d.mu.Lock()
2784-
require.Equal(b, int64(ks.Count()-1), d.mu.versions.metrics.Levels[numLevels-1].TablesCount)
2784+
require.Equal(b, int64(ks.Count()-1), d.mu.versions.metrics.Levels[numLevels-1].Tables.Count)
27852785
d.mu.Unlock()
27862786

27872787
seekKey := testkeys.Key(ks, 1)
@@ -3070,7 +3070,7 @@ func runBenchmarkQueueWorkload(b *testing.B, deleteRatio float32, initOps int, v
30703070
for i := 0; i < numLevels; i++ {
30713071
numTombstones := stats.Levels[i].KindsCount[base.InternalKeyKindDelete]
30723072
numSets := stats.Levels[i].KindsCount[base.InternalKeyKindSet]
3073-
numTables := metrics.Levels[i].TablesCount
3073+
numTables := metrics.Levels[i].Tables.Count
30743074
if numSets > 0 {
30753075
b.Logf("L%d: %d tombstones, %d sets, %d sstables\n", i, numTombstones, numSets, numTables)
30763076
}

metrics.go

Lines changed: 55 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ import (
2121
"github.com/cockroachdb/pebble/internal/deletepacer"
2222
"github.com/cockroachdb/pebble/internal/manifest"
2323
"github.com/cockroachdb/pebble/internal/manual"
24+
"github.com/cockroachdb/pebble/metrics"
2425
"github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
2526
"github.com/cockroachdb/pebble/record"
2627
"github.com/cockroachdb/pebble/sstable"
@@ -55,16 +56,16 @@ type LevelMetrics struct {
5556
// sublevel count of 0, implying no read amplification. Only L0 will have
5657
// a sublevel count other than 0 or 1.
5758
Sublevels int32
58-
// The total count of sstables in the level.
59-
TablesCount int64
60-
// The total size in bytes of the sstables in the level. Note that if tables
61-
// contain references to blob files, this quantity does not include the the
62-
// size of the blob files or the referenced values.
63-
TablesSize int64
64-
// The total number of virtual sstables in the level.
65-
VirtualTablesCount uint64
66-
// The total size of the virtual sstables in the level.
67-
VirtualTablesSize uint64
59+
60+
// The total count and size of sstables in the level.
61+
//
62+
// Note that if tables contain references to blob files, the size does
63+
// not include the size of the blob files or the referenced values.
64+
Tables metrics.CountAndSize
65+
66+
// The total count and size of virtual sstables in the level.
67+
VirtualTables metrics.CountAndSize
68+
6869
// The estimated total physical size of all blob references across all
6970
// sstables in the level. The physical size is estimated based on the size
7071
// of referenced values and the values' blob file's compression ratios.
@@ -77,35 +78,24 @@ type LevelMetrics struct {
7778
FillFactor float64
7879
// The level's compensated fill factor. See candidateLevelInfo.
7980
CompensatedFillFactor float64
81+
82+
// The count and total size of sstable tables ingested into this level.
83+
TablesIngested metrics.CountAndSize
84+
// The count and total size of sstable tables compacted to this level.
85+
TablesCompacted metrics.CountAndSize
86+
// The count and total size of sstables flushed to this level.
87+
TablesFlushed metrics.CountAndSize
88+
// The count and total size of sstables moved to this level by a "move"
89+
// compaction.
90+
TablesMoved metrics.CountAndSize
91+
8092
// The number of incoming bytes from other levels' sstables read during
8193
// compactions. This excludes bytes moved and bytes ingested. For L0 this is
8294
// the bytes written to the WAL.
8395
TableBytesIn uint64
84-
// The number of sstable bytes ingested. The sibling metric for tables is
85-
// TablesIngested.
86-
TableBytesIngested uint64
87-
// The number of sstable bytes moved into the level by a "move" compaction.
88-
// The sibling metric for tables is TablesMoved.
89-
TableBytesMoved uint64
9096
// The number of bytes read for compactions at the level. This includes bytes
9197
// read from other levels (BytesIn), as well as bytes read for the level.
9298
TableBytesRead uint64
93-
// The number of bytes written to sstables during compactions. The sibling
94-
// metric for tables is TablesCompacted. This metric may be summed with
95-
// BytesFlushed to compute the total bytes written for the level.
96-
TableBytesCompacted uint64
97-
// The number of bytes written to sstables during flushes. The sibling
98-
// metrics for tables is TablesFlushed. This metric is always zero for all
99-
// levels other than L0.
100-
TableBytesFlushed uint64
101-
// The number of sstables compacted to this level.
102-
TablesCompacted uint64
103-
// The number of sstables flushed to this level.
104-
TablesFlushed uint64
105-
// The number of sstables ingested into the level.
106-
TablesIngested uint64
107-
// The number of sstables moved to this level by a "move" compaction.
108-
TablesMoved uint64
10999
// The number of sstables deleted in a level by a delete-only compaction.
110100
TablesDeleted uint64
111101
// The number of sstables excised in a level by a delete-only compaction.
@@ -152,28 +142,22 @@ type LevelMetrics struct {
152142
// is exactly known. Virtual sstables' sizes are estimated, and the size of
153143
// values stored in blob files is estimated based on the volume of referenced
154144
// data and the blob file's compression ratio.
155-
func (m *LevelMetrics) AggregateSize() int64 {
156-
return m.TablesSize + int64(m.EstimatedReferencesSize)
145+
func (m *LevelMetrics) AggregateSize() uint64 {
146+
return m.Tables.Bytes + m.EstimatedReferencesSize
157147
}
158148

159149
// Add updates the counter metrics for the level.
160150
func (m *LevelMetrics) Add(u *LevelMetrics) {
161151
m.Sublevels += u.Sublevels
162-
m.TablesCount += u.TablesCount
163-
m.TablesSize += u.TablesSize
164-
m.VirtualTablesCount += u.VirtualTablesCount
165-
m.VirtualTablesSize += u.VirtualTablesSize
152+
m.Tables.Accumulate(u.Tables)
153+
m.VirtualTables.Accumulate(u.VirtualTables)
166154
m.EstimatedReferencesSize += u.EstimatedReferencesSize
155+
m.TablesIngested.Accumulate(u.TablesIngested)
156+
m.TablesCompacted.Accumulate(u.TablesCompacted)
157+
m.TablesFlushed.Accumulate(u.TablesFlushed)
158+
m.TablesMoved.Accumulate(u.TablesMoved)
167159
m.TableBytesIn += u.TableBytesIn
168-
m.TableBytesIngested += u.TableBytesIngested
169-
m.TableBytesMoved += u.TableBytesMoved
170160
m.TableBytesRead += u.TableBytesRead
171-
m.TableBytesCompacted += u.TableBytesCompacted
172-
m.TableBytesFlushed += u.TableBytesFlushed
173-
m.TablesCompacted += u.TablesCompacted
174-
m.TablesFlushed += u.TablesFlushed
175-
m.TablesIngested += u.TablesIngested
176-
m.TablesMoved += u.TablesMoved
177161
m.BlobBytesCompacted += u.BlobBytesCompacted
178162
m.BlobBytesFlushed += u.BlobBytesFlushed
179163
m.BlobBytesRead += u.BlobBytesRead
@@ -200,7 +184,7 @@ func (m *LevelMetrics) WriteAmp() float64 {
200184
if m.TableBytesIn == 0 {
201185
return 0
202186
}
203-
return float64(m.TableBytesFlushed+m.TableBytesCompacted+m.BlobBytesFlushed+m.BlobBytesCompacted) /
187+
return float64(m.TablesFlushed.Bytes+m.TablesCompacted.Bytes+m.BlobBytesFlushed+m.BlobBytesCompacted) /
204188
float64(m.TableBytesIn)
205189
}
206190

@@ -586,7 +570,7 @@ func (m *Metrics) DiskSpaceUsage() uint64 {
586570
func (m *Metrics) NumVirtual() uint64 {
587571
var n uint64
588572
for _, level := range m.Levels {
589-
n += level.VirtualTablesCount
573+
n += level.VirtualTables.Count
590574
}
591575
return n
592576
}
@@ -597,7 +581,7 @@ func (m *Metrics) NumVirtual() uint64 {
597581
func (m *Metrics) VirtualSize() uint64 {
598582
var size uint64
599583
for _, level := range m.Levels {
600-
size += level.VirtualTablesSize
584+
size += level.VirtualTables.Bytes
601585
}
602586
return size
603587
}
@@ -621,32 +605,31 @@ func (m *Metrics) Total() LevelMetrics {
621605
total.Add(l)
622606
}
623607
// Compute total bytes-in as the bytes written to the WAL + bytes ingested.
624-
total.TableBytesIn = m.WAL.BytesWritten + total.TableBytesIngested
608+
total.TableBytesIn = m.WAL.BytesWritten + total.TablesIngested.Bytes
625609
// Add the total bytes-in to the total bytes-flushed. This is to account for
626610
// the bytes written to the log and bytes written externally and then
627611
// ingested.
628-
total.TableBytesFlushed += total.TableBytesIn
612+
total.TablesFlushed.Bytes += total.TableBytesIn
629613
return total
630614
}
631615

632616
// RemoteTablesTotal returns the total number of remote tables and their total
633617
// size. Remote tables are computed as the difference between total tables
634618
// (live + obsolete + zombie) and local tables.
635-
func (m *Metrics) RemoteTablesTotal() (count uint64, size uint64) {
636-
var liveTables, liveTableBytes int64
619+
func (m *Metrics) RemoteTablesTotal() metrics.CountAndSize {
620+
var liveTables metrics.CountAndSize
637621
for level := 0; level < numLevels; level++ {
638-
liveTables += m.Levels[level].TablesCount
639-
liveTableBytes += m.Levels[level].TablesSize
622+
liveTables.Accumulate(m.Levels[level].Tables)
640623
}
641-
totalCount := liveTables + m.Table.ObsoleteCount + m.Table.ZombieCount
624+
totalCount := int64(liveTables.Count) + m.Table.ObsoleteCount + m.Table.ZombieCount
642625
localCount := m.Table.Local.LiveCount + m.Table.Local.ObsoleteCount + m.Table.Local.ZombieCount
643626
remoteCount := uint64(totalCount) - localCount
644627

645-
totalSize := uint64(liveTableBytes) + m.Table.ObsoleteSize + m.Table.ZombieSize
628+
totalSize := uint64(liveTables.Bytes) + m.Table.ObsoleteSize + m.Table.ZombieSize
646629
localSize := m.Table.Local.LiveSize + m.Table.Local.ObsoleteSize + m.Table.Local.ZombieSize
647630
remoteSize := totalSize - localSize
648631

649-
return remoteCount, remoteSize
632+
return metrics.CountAndSize{Count: remoteCount, Bytes: remoteSize}
650633
}
651634

652635
// Assert that Metrics implements redact.SafeFormatter.
@@ -667,27 +650,27 @@ var (
667650
}
668651
return fmt.Sprintf("L%d", tupleIndex)
669652
}),
670-
table.Bytes("size", 10, table.AlignRight, func(m *LevelMetrics) uint64 { return uint64(m.TablesSize) + m.EstimatedReferencesSize }),
653+
table.Bytes("size", 10, table.AlignRight, func(m *LevelMetrics) uint64 { return uint64(m.Tables.Bytes) + m.EstimatedReferencesSize }),
671654
table.Div(),
672-
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) int64 { return m.TablesCount }),
673-
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) int64 { return m.TablesSize }),
655+
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.Tables.Count }),
656+
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.Tables.Bytes }),
674657
table.Div(),
675-
table.Count("count", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTablesCount }),
676-
table.Count("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTablesSize }),
658+
table.Count("count", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTables.Count }),
659+
table.Count("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.VirtualTables.Bytes }),
677660
table.Div(),
678661
table.Bytes("refsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.EstimatedReferencesSize }),
679662
table.Bytes("valblk", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.Additional.ValueBlocksSize }),
680663
table.Div(),
681664
table.Bytes("in", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesIn }),
682665
table.Div(),
683-
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesIngested }),
684-
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesIngested }),
666+
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesIngested.Count }),
667+
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesIngested.Bytes }),
685668
table.Div(),
686669
table.Int("r", 3, table.AlignRight, func(m *LevelMetrics) int { return int(m.Sublevels) }),
687670
table.Float("w", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.WriteAmp() }),
688671
)
689672
def.FilterFn = func(tupleIndex int, m *LevelMetrics) (passed bool) {
690-
return m.TablesCount != 0 || m.VirtualTablesCount != 0 || m.TableBytesIn != 0 || m.TablesIngested != 0
673+
return m.Tables.Count != 0 || m.VirtualTables.Count != 0 || m.TableBytesIn != 0 || m.TablesIngested.Count != 0
691674
}
692675
return def
693676
}()
@@ -705,8 +688,8 @@ var (
705688
table.Float("ff", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.FillFactor }),
706689
table.Float("cff", 5, table.AlignRight, func(m *LevelMetrics) float64 { return m.CompensatedFillFactor }),
707690
table.Div(),
708-
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesMoved }),
709-
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesMoved }),
691+
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesMoved.Count }),
692+
table.Bytes("size", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesMoved.Bytes }),
710693
table.Div(),
711694
table.Bytes("top", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.MultiLevel.TableBytesInTop }),
712695
table.Bytes("in", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.MultiLevel.TableBytesIn }),
@@ -715,14 +698,14 @@ var (
715698
table.Bytes("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesRead }),
716699
table.Bytes("blob", 5, table.AlignRight, func(m *LevelMetrics) uint64 { return m.BlobBytesRead }),
717700
table.Div(),
718-
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesFlushed + m.TablesCompacted }),
719-
table.Bytes("sstsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TableBytesFlushed + m.TableBytesCompacted }),
701+
table.Count("tables", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesFlushed.Count + m.TablesCompacted.Count }),
702+
table.Bytes("sstsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.TablesFlushed.Bytes + m.TablesCompacted.Bytes }),
720703
table.Bytes("blobsz", 6, table.AlignRight, func(m *LevelMetrics) uint64 { return m.BlobBytesFlushed + m.BlobBytesCompacted }),
721704
)
722705
def.FilterFn = func(tupleIndex int, m *LevelMetrics) (passed bool) {
723-
return !math.IsNaN(m.Score) || m.FillFactor != 0 || m.TablesMoved != 0 || m.MultiLevel.TableBytesInTop != 0 ||
706+
return !math.IsNaN(m.Score) || m.FillFactor != 0 || m.TablesMoved.Count != 0 || m.MultiLevel.TableBytesInTop != 0 ||
724707
m.MultiLevel.TableBytesIn != 0 || m.MultiLevel.TableBytesRead != 0 || m.BlobBytesRead != 0 ||
725-
m.TablesFlushed != 0 || m.TablesCompacted != 0 || m.BlobBytesFlushed != 0 || m.BlobBytesCompacted != 0
708+
m.TablesFlushed.Count != 0 || m.TablesCompacted.Count != 0 || m.BlobBytesFlushed != 0 || m.BlobBytesCompacted != 0
726709
}
727710
return def
728711
}()

0 commit comments

Comments
 (0)