Skip to content

Commit 12f4568

Browse files
committed
metrics: use compact humanization
Use the `Compact` and `OmitI` formatter flags, since we have to fit many of these metrics on one line. This used to be the case historically, before we switched some uses to `crhumanize`.
1 parent 78a4316 commit 12f4568

File tree

8 files changed

+451
-443
lines changed

8 files changed

+451
-443
lines changed

internal/ascii/table/table.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -305,7 +305,7 @@ func Count[T any, N constraints.Integer](
305305
) Field[T] {
306306
spec := widthStr(width, align) + "s"
307307
return makeFuncField(header, width, align, func(ctx RenderContext[T], tuple T) {
308-
ctx.PaddedPos(width).Printf(spec, crhumanize.Count(fn(tuple)))
308+
ctx.PaddedPos(width).Printf(spec, crhumanize.Count(fn(tuple), crhumanize.Compact, crhumanize.OmitI))
309309
})
310310
}
311311

@@ -314,7 +314,7 @@ func Bytes[T any, N constraints.Integer](
314314
) Field[T] {
315315
spec := widthStr(width, align) + "s"
316316
return makeFuncField(header, width, align, func(ctx RenderContext[T], tuple T) {
317-
ctx.PaddedPos(width).Printf(spec, crhumanize.Bytes(fn(tuple)))
317+
ctx.PaddedPos(width).Printf(spec, crhumanize.Bytes(fn(tuple), crhumanize.Compact, crhumanize.OmitI))
318318
})
319319
}
320320

metrics.go

Lines changed: 44 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@ import (
1212
"time"
1313
"unsafe"
1414

15+
"github.com/cockroachdb/crlib/crhumanize"
1516
"github.com/cockroachdb/pebble/internal/ascii"
1617
"github.com/cockroachdb/pebble/internal/ascii/table"
1718
"github.com/cockroachdb/pebble/internal/base"
1819
"github.com/cockroachdb/pebble/internal/cache"
19-
"github.com/cockroachdb/pebble/internal/humanize"
2020
"github.com/cockroachdb/pebble/internal/manifest"
2121
"github.com/cockroachdb/pebble/internal/manual"
2222
"github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
@@ -911,16 +911,16 @@ func (m *Metrics) String() string {
911911

912912
commitPipelineInfoContents := commitPipelineInfo{
913913
// wals.
914-
files: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.WAL.Files), humanize.Bytes.Uint64(m.WAL.Size)),
915-
written: fmt.Sprintf("%s: %s", humanize.Bytes.Uint64(m.WAL.BytesIn), humanize.Bytes.Uint64(m.WAL.BytesWritten)),
914+
files: fmt.Sprintf("%s (%s)", humanizeCount(m.WAL.Files), humanizeBytes(m.WAL.Size)),
915+
written: fmt.Sprintf("%s: %s", humanizeBytes(m.WAL.BytesIn), humanizeBytes(m.WAL.BytesWritten)),
916916
overhead: fmt.Sprintf("%.1f%%", percent(int64(m.WAL.BytesWritten)-int64(m.WAL.BytesIn), int64(m.WAL.BytesIn))),
917917
// memtables.
918-
flushes: humanize.Count.Int64(m.Flush.Count).String(),
919-
live: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.MemTable.Count), humanize.Bytes.Uint64(m.MemTable.Size)),
920-
zombie: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.MemTable.ZombieCount), humanize.Bytes.Uint64(m.MemTable.ZombieSize)),
918+
flushes: crhumanize.Count(m.Flush.Count).String(),
919+
live: fmt.Sprintf("%s (%s)", humanizeCount(m.MemTable.Count), humanizeBytes(m.MemTable.Size)),
920+
zombie: fmt.Sprintf("%s (%s)", humanizeCount(m.MemTable.ZombieCount), humanizeBytes(m.MemTable.ZombieSize)),
921921
// ingestions.
922-
total: humanize.Count.Uint64(m.WAL.BytesIn + m.WAL.BytesWritten).String(),
923-
flushable: fmt.Sprintf("%s (%s)", humanize.Count.Uint64(m.Flush.AsIngestCount), humanize.Bytes.Uint64(m.Flush.AsIngestBytes)),
922+
total: crhumanize.Count(m.WAL.BytesIn + m.WAL.BytesWritten).String(),
923+
flushable: fmt.Sprintf("%s (%s)", humanizeCount(m.Flush.AsIngestCount), humanizeBytes(m.Flush.AsIngestBytes)),
924924
}
925925
commitPipelineInfoIter := func(yield func(commitPipelineInfo) bool) {
926926
yield(commitPipelineInfoContents)
@@ -932,13 +932,13 @@ func (m *Metrics) String() string {
932932
}, max(commitPipelineInfoTable.CumulativeFieldWidth, cur.Column()), -3)
933933

934934
iteratorInfoContents := iteratorInfo{
935-
bcEntries: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.BlockCache.Count), humanize.Bytes.Int64(m.BlockCache.Size)),
935+
bcEntries: fmt.Sprintf("%s (%s)", humanizeCount(m.BlockCache.Count), humanizeBytes(m.BlockCache.Size)),
936936
bcHitRate: fmt.Sprintf("%.1f%%", hitRate(m.BlockCache.Hits, m.BlockCache.Misses)),
937-
fcEntries: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.FileCache.TableCount), humanize.Bytes.Int64(m.FileCache.Size)),
937+
fcEntries: fmt.Sprintf("%s (%s)", humanizeCount(m.FileCache.TableCount), humanizeBytes(m.FileCache.Size)),
938938
fcHitRate: fmt.Sprintf("%.1f%%", hitRate(m.FileCache.Hits, m.FileCache.Misses)),
939939
bloomFilterUtil: fmt.Sprintf("%.1f%%", hitRate(m.Filter.Hits, m.Filter.Misses)),
940-
sstableItersOpen: humanize.Count.Int64(m.TableIters).String(),
941-
snapshotsOpen: humanize.Count.Uint64(uint64(m.Snapshots.Count)).String(),
940+
sstableItersOpen: humanizeCount(m.TableIters).String(),
941+
snapshotsOpen: humanizeCount(m.Snapshots.Count).String(),
942942
}
943943
iteratorInfoIter := func(yield func(iteratorInfo) bool) {
944944
yield(iteratorInfoContents)
@@ -949,23 +949,23 @@ func (m *Metrics) String() string {
949949
return iteratorInfoTable.Render(cur, table.RenderOptions{}, iteratorInfoIter)
950950
}, max(iteratorInfoTable.CumulativeFieldWidth, cur.Column()), -3)
951951

952-
status := fmt.Sprintf("%s pending", humanize.Count.Int64(m.Table.PendingStatsCollectionCount))
952+
status := fmt.Sprintf("%s pending", humanizeCount(m.Table.PendingStatsCollectionCount))
953953
if !m.Table.InitialStatsCollectionComplete {
954954
status = "loading"
955955
} else if m.Table.PendingStatsCollectionCount == 0 {
956956
status = "all loaded"
957957
}
958958
tableInfoContents := tableInfo{
959959
stats: status,
960-
backing: fmt.Sprintf("%s (%d)", humanize.Bytes.Uint64(m.Table.BackingTableSize), m.Table.BackingTableCount),
961-
zombie: fmt.Sprintf("%s (%d)", humanize.Bytes.Uint64(m.Table.ZombieSize), m.Table.ZombieCount),
962-
localZombie: humanize.Bytes.Uint64(m.Table.Local.ZombieSize).String(),
960+
backing: fmt.Sprintf("%s (%d)", humanizeBytes(m.Table.BackingTableSize), m.Table.BackingTableCount),
961+
zombie: fmt.Sprintf("%s (%d)", humanizeBytes(m.Table.ZombieSize), m.Table.ZombieCount),
962+
localZombie: humanizeBytes(m.Table.Local.ZombieSize).String(),
963963
}
964964
blobInfoContents := blobInfo{
965-
live: fmt.Sprintf("%s (%s)", humanize.Count.Uint64(m.BlobFiles.LiveCount), humanize.Bytes.Uint64(m.BlobFiles.LiveSize)),
966-
zombie: fmt.Sprintf("%s (%s)", humanize.Count.Uint64(m.BlobFiles.ZombieCount), humanize.Bytes.Uint64(m.BlobFiles.ZombieSize)),
967-
total: humanize.Bytes.Uint64(m.BlobFiles.ValueSize).String(),
968-
referenced: humanize.Bytes.Uint64(m.BlobFiles.ReferencedValueSize).String(),
965+
live: fmt.Sprintf("%s (%s)", humanizeCount(m.BlobFiles.LiveCount), humanizeBytes(m.BlobFiles.LiveSize)),
966+
zombie: fmt.Sprintf("%s (%s)", humanizeCount(m.BlobFiles.ZombieCount), humanizeBytes(m.BlobFiles.ZombieSize)),
967+
total: humanizeBytes(m.BlobFiles.ValueSize).String(),
968+
referenced: humanizeBytes(m.BlobFiles.ReferencedValueSize).String(),
969969
referencedPercent: fmt.Sprintf("%.0f%%", percent(m.BlobFiles.ReferencedValueSize, m.BlobFiles.ValueSize)),
970970
}
971971
fileInfoContents := tableAndBlobInfo{
@@ -988,13 +988,13 @@ func (m *Metrics) String() string {
988988
return m.manualMemory[purpose].InUseBytes
989989
}
990990
cgoMemInfoContents := cgoMemInfo{
991-
tot: humanize.Bytes.Uint64(inUseTotal).String(),
992-
bcTot: humanize.Bytes.Uint64(inUse(manual.BlockCacheData) +
991+
tot: humanizeBytes(inUseTotal).String(),
992+
bcTot: humanizeBytes(inUse(manual.BlockCacheData) +
993993
inUse(manual.BlockCacheMap) + inUse(manual.BlockCacheEntry)).String(),
994-
bcData: humanize.Bytes.Uint64(inUse(manual.BlockCacheData)).String(),
995-
bcMaps: humanize.Bytes.Uint64(inUse(manual.BlockCacheMap)).String(),
996-
bcEnts: humanize.Bytes.Uint64(inUse(manual.BlockCacheEntry)).String(),
997-
memtablesTot: humanize.Bytes.Uint64(inUse(manual.MemTable)).String(),
994+
bcData: humanizeBytes(inUse(manual.BlockCacheData)).String(),
995+
bcMaps: humanizeBytes(inUse(manual.BlockCacheMap)).String(),
996+
bcEnts: humanizeBytes(inUse(manual.BlockCacheEntry)).String(),
997+
memtablesTot: humanizeBytes(inUse(manual.MemTable)).String(),
998998
}
999999
cgoMemInfoIter := func(yield func(cgoMemInfo) bool) {
10001000
yield(cgoMemInfoContents)
@@ -1005,11 +1005,11 @@ func (m *Metrics) String() string {
10051005
}, max(cgoMemInfoTable.CumulativeFieldWidth, cur.Column()), -2)
10061006

10071007
compactionMetricsInfoContents := compactionMetricsInfo{
1008-
estimatedDebt: humanize.Bytes.Uint64(m.Compact.EstimatedDebt).String(),
1009-
inProgress: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.Compact.NumInProgress),
1010-
humanize.Bytes.Int64(m.Compact.InProgressBytes)),
1011-
cancelled: fmt.Sprintf("%s (%s)", humanize.Count.Int64(m.Compact.CancelledCount),
1012-
humanize.Bytes.Int64(m.Compact.CancelledBytes)),
1008+
estimatedDebt: humanizeBytes(m.Compact.EstimatedDebt).String(),
1009+
inProgress: fmt.Sprintf("%s (%s)", humanizeCount(m.Compact.NumInProgress),
1010+
humanizeBytes(m.Compact.InProgressBytes)),
1011+
cancelled: fmt.Sprintf("%s (%s)", humanizeCount(m.Compact.CancelledCount),
1012+
humanizeBytes(m.Compact.CancelledBytes)),
10131013
failed: m.Compact.FailedCount,
10141014
problemSpans: fmt.Sprintf("%d%s", m.Compact.NumProblemSpans, ifNonZero(m.Compact.NumProblemSpans, "!!")),
10151015
}
@@ -1022,11 +1022,11 @@ func (m *Metrics) String() string {
10221022
}, max(compactionInfoTable.CumulativeFieldWidth, cur.Column()), -2)
10231023

10241024
keysInfoContents := keysInfo{
1025-
rangeKeys: humanize.Count.Uint64(m.Keys.RangeKeySetsCount).String(),
1026-
tombstones: humanize.Count.Uint64(m.Keys.TombstoneCount).String(),
1025+
rangeKeys: humanizeCount(m.Keys.RangeKeySetsCount).String(),
1026+
tombstones: humanizeCount(m.Keys.TombstoneCount).String(),
10271027
missizedTombstones: fmt.Sprintf("%d%s", m.Keys.MissizedTombstonesCount, ifNonZero(m.Keys.MissizedTombstonesCount, "!!")),
1028-
pointDels: humanize.Bytes.Uint64(m.Table.Garbage.PointDeletionsBytesEstimate).String(),
1029-
rangeDels: humanize.Bytes.Uint64(m.Table.Garbage.RangeDeletionsBytesEstimate).String(),
1028+
pointDels: humanizeBytes(m.Table.Garbage.PointDeletionsBytesEstimate).String(),
1029+
rangeDels: humanizeBytes(m.Table.Garbage.RangeDeletionsBytesEstimate).String(),
10301030
}
10311031
keysInfoIter := func(yield func(keysInfo) bool) {
10321032
yield(keysInfoContents)
@@ -1040,7 +1040,7 @@ func (m *Metrics) String() string {
10401040
func(cur ascii.Cursor) {
10411041
maybePrintCompression := func(pos ascii.Cursor, name string, value int64) ascii.Cursor {
10421042
if value > 0 {
1043-
pos = pos.Printf(" %s %s", name, humanize.Count.Int64(value)).NewlineReturn()
1043+
pos = pos.Printf(" %s %s", name, humanizeCount(value)).NewlineReturn()
10441044
}
10451045
return pos
10461046
}
@@ -1117,3 +1117,11 @@ func (m *Metrics) updateLevelMetrics(updates levelMetricsDelta) {
11171117
}
11181118
}
11191119
}
1120+
1121+
func humanizeCount[T crhumanize.Integer](value T) crhumanize.SafeString {
1122+
return crhumanize.Count(value, crhumanize.Compact, crhumanize.OmitI)
1123+
}
1124+
1125+
func humanizeBytes[T crhumanize.Integer](value T) crhumanize.SafeString {
1126+
return crhumanize.Bytes(value, crhumanize.Compact, crhumanize.OmitI)
1127+
}

testdata/compaction/l0_to_lbase_compaction

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -35,24 +35,24 @@ metrics
3535
LSM | vtables | value sep | | ingested | amp
3636
level size | tables size | count size | refsz valblk | in | tables size | r w
3737
-----------------+--------------+--------------+---------------+--------+--------------+----------
38-
0 0 B | 0 0 B | 0 0 | 0 B 0 B | 4.5 Mi | 0 0 B | 0 1.32
39-
1 0 B | 0 0 B | 0 0 | 0 B 0 B | 0 B | 0 0 B | 0 0
40-
2 0 B | 0 0 B | 0 0 | 0 B 0 B | 0 B | 0 0 B | 0 0
41-
3 0 B | 0 0 B | 0 0 | 0 B 0 B | 0 B | 0 0 B | 0 0
42-
4 0 B | 0 0 B | 0 0 | 0 B 0 B | 0 B | 0 0 B | 0 0
43-
5 0 B | 0 0 B | 0 0 | 0 B 0 B | 0 B | 0 0 B | 0 0
44-
total 6 MiB | 3 6 MiB | 0 0 | 0 B 0 B | 0 B | 0 0 B | 1 0
38+
0 0B | 0 0B | 0 0 | 0B 0B | 4.5MB | 0 0B | 0 1.32
39+
1 0B | 0 0B | 0 0 | 0B 0B | 0B | 0 0B | 0 0
40+
2 0B | 0 0B | 0 0 | 0B 0B | 0B | 0 0B | 0 0
41+
3 0B | 0 0B | 0 0 | 0B 0B | 0B | 0 0B | 0 0
42+
4 0B | 0 0B | 0 0 | 0B 0B | 0B | 0 0B | 0 0
43+
5 0B | 0 0B | 0 0 | 0B 0B | 0B | 0 0B | 0 0
44+
total 6MB | 3 6MB | 0 0 | 0B 0B | 0B | 0 0B | 1 0
4545
--------------------------------------------------------------------------------------------------
4646
COMPACTIONS | moved | multilevel | read | written
4747
level | score ff cff | tables size | top in read | tables blob | tables sstsz blobsz
4848
------+-------------------+--------------+-------------------+--------------+---------------------
49-
0 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 3 6 MiB 0 B
50-
1 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
51-
2 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
52-
3 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
53-
4 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
54-
5 | 0 0 0 | 0 0 B | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
55-
total | 0 0.09 0.09 | 3 6 MiB | 0 B 0 B 0 B | 0 B 0 B | 0 0 B 0 B
49+
0 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 3 6MB 0B
50+
1 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 0 0B 0B
51+
2 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 0 0B 0B
52+
3 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 0 0B 0B
53+
4 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 0 0B 0B
54+
5 | 0 0 0 | 0 0B | 0B 0B 0B | 0B 0B | 0 0B 0B
55+
total | 0 0.09 0.09 | 3 6MB | 0B 0B 0B | 0B 0B | 0 0B 0B
5656
--------------------------------------------------------------------------------------------------
5757
kind | default delete elision move read tomb rewrite copy multi blob
5858
count | 0 0 0 3 0 0 0 0 0 0
@@ -61,7 +61,7 @@ COMMIT PIPELINE
6161
wals | memtables | ingestions
6262
files | written | overhead | flushes | live | zombie | total | flushable
6363
----------+------------+-----------+-----------+------------+------------+-----------+------------
64-
1 (0B) | 4.5MB: 4.5 | 0.0% | 2 | 1 (512KB) | 1 (512KB) | 9.5M | 0 (0B)
64+
1 (0B) | 4.5MB: 4.5 | 0.0% | 2 | 1 (512KB) | 1 (512KB) | 9.5 M | 0 (0B)
6565
--------------------------------------------------------------------------------------------------
6666
ITERATORS
6767
block cache | file cache | filter | sst iters | snapshots

0 commit comments

Comments
 (0)