@@ -12,11 +12,11 @@ import (
12
12
"time"
13
13
"unsafe"
14
14
15
+ "github.com/cockroachdb/crlib/crhumanize"
15
16
"github.com/cockroachdb/pebble/internal/ascii"
16
17
"github.com/cockroachdb/pebble/internal/ascii/table"
17
18
"github.com/cockroachdb/pebble/internal/base"
18
19
"github.com/cockroachdb/pebble/internal/cache"
19
- "github.com/cockroachdb/pebble/internal/humanize"
20
20
"github.com/cockroachdb/pebble/internal/manifest"
21
21
"github.com/cockroachdb/pebble/internal/manual"
22
22
"github.com/cockroachdb/pebble/objstorage/objstorageprovider/sharedcache"
@@ -911,16 +911,16 @@ func (m *Metrics) String() string {
911
911
912
912
commitPipelineInfoContents := commitPipelineInfo {
913
913
// wals.
914
- files : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .WAL .Files ), humanize . Bytes . Uint64 (m .WAL .Size )),
915
- written : fmt .Sprintf ("%s: %s" , humanize . Bytes . Uint64 (m .WAL .BytesIn ), humanize . Bytes . Uint64 (m .WAL .BytesWritten )),
914
+ files : fmt .Sprintf ("%s (%s)" , humanizeCount (m .WAL .Files ), humanizeBytes (m .WAL .Size )),
915
+ written : fmt .Sprintf ("%s: %s" , humanizeBytes (m .WAL .BytesIn ), humanizeBytes (m .WAL .BytesWritten )),
916
916
overhead : fmt .Sprintf ("%.1f%%" , percent (int64 (m .WAL .BytesWritten )- int64 (m .WAL .BytesIn ), int64 (m .WAL .BytesIn ))),
917
917
// memtables.
918
- flushes : humanize .Count . Int64 (m .Flush .Count ).String (),
919
- live : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .MemTable .Count ), humanize . Bytes . Uint64 (m .MemTable .Size )),
920
- zombie : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .MemTable .ZombieCount ), humanize . Bytes . Uint64 (m .MemTable .ZombieSize )),
918
+ flushes : crhumanize .Count (m .Flush .Count ).String (),
919
+ live : fmt .Sprintf ("%s (%s)" , humanizeCount (m .MemTable .Count ), humanizeBytes (m .MemTable .Size )),
920
+ zombie : fmt .Sprintf ("%s (%s)" , humanizeCount (m .MemTable .ZombieCount ), humanizeBytes (m .MemTable .ZombieSize )),
921
921
// ingestions.
922
- total : humanize .Count . Uint64 (m .WAL .BytesIn + m .WAL .BytesWritten ).String (),
923
- flushable : fmt .Sprintf ("%s (%s)" , humanize . Count . Uint64 (m .Flush .AsIngestCount ), humanize . Bytes . Uint64 (m .Flush .AsIngestBytes )),
922
+ total : crhumanize .Count (m .WAL .BytesIn + m .WAL .BytesWritten ).String (),
923
+ flushable : fmt .Sprintf ("%s (%s)" , humanizeCount (m .Flush .AsIngestCount ), humanizeBytes (m .Flush .AsIngestBytes )),
924
924
}
925
925
commitPipelineInfoIter := func (yield func (commitPipelineInfo ) bool ) {
926
926
yield (commitPipelineInfoContents )
@@ -932,13 +932,13 @@ func (m *Metrics) String() string {
932
932
}, max (commitPipelineInfoTable .CumulativeFieldWidth , cur .Column ()), - 3 )
933
933
934
934
iteratorInfoContents := iteratorInfo {
935
- bcEntries : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .BlockCache .Count ), humanize . Bytes . Int64 (m .BlockCache .Size )),
935
+ bcEntries : fmt .Sprintf ("%s (%s)" , humanizeCount (m .BlockCache .Count ), humanizeBytes (m .BlockCache .Size )),
936
936
bcHitRate : fmt .Sprintf ("%.1f%%" , hitRate (m .BlockCache .Hits , m .BlockCache .Misses )),
937
- fcEntries : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .FileCache .TableCount ), humanize . Bytes . Int64 (m .FileCache .Size )),
937
+ fcEntries : fmt .Sprintf ("%s (%s)" , humanizeCount (m .FileCache .TableCount ), humanizeBytes (m .FileCache .Size )),
938
938
fcHitRate : fmt .Sprintf ("%.1f%%" , hitRate (m .FileCache .Hits , m .FileCache .Misses )),
939
939
bloomFilterUtil : fmt .Sprintf ("%.1f%%" , hitRate (m .Filter .Hits , m .Filter .Misses )),
940
- sstableItersOpen : humanize . Count . Int64 (m .TableIters ).String (),
941
- snapshotsOpen : humanize . Count . Uint64 ( uint64 ( m .Snapshots .Count ) ).String (),
940
+ sstableItersOpen : humanizeCount (m .TableIters ).String (),
941
+ snapshotsOpen : humanizeCount ( m .Snapshots .Count ).String (),
942
942
}
943
943
iteratorInfoIter := func (yield func (iteratorInfo ) bool ) {
944
944
yield (iteratorInfoContents )
@@ -949,23 +949,23 @@ func (m *Metrics) String() string {
949
949
return iteratorInfoTable .Render (cur , table.RenderOptions {}, iteratorInfoIter )
950
950
}, max (iteratorInfoTable .CumulativeFieldWidth , cur .Column ()), - 3 )
951
951
952
- status := fmt .Sprintf ("%s pending" , humanize . Count . Int64 (m .Table .PendingStatsCollectionCount ))
952
+ status := fmt .Sprintf ("%s pending" , humanizeCount (m .Table .PendingStatsCollectionCount ))
953
953
if ! m .Table .InitialStatsCollectionComplete {
954
954
status = "loading"
955
955
} else if m .Table .PendingStatsCollectionCount == 0 {
956
956
status = "all loaded"
957
957
}
958
958
tableInfoContents := tableInfo {
959
959
stats : status ,
960
- backing : fmt .Sprintf ("%s (%d)" , humanize . Bytes . Uint64 (m .Table .BackingTableSize ), m .Table .BackingTableCount ),
961
- zombie : fmt .Sprintf ("%s (%d)" , humanize . Bytes . Uint64 (m .Table .ZombieSize ), m .Table .ZombieCount ),
962
- localZombie : humanize . Bytes . Uint64 (m .Table .Local .ZombieSize ).String (),
960
+ backing : fmt .Sprintf ("%s (%d)" , humanizeBytes (m .Table .BackingTableSize ), m .Table .BackingTableCount ),
961
+ zombie : fmt .Sprintf ("%s (%d)" , humanizeBytes (m .Table .ZombieSize ), m .Table .ZombieCount ),
962
+ localZombie : humanizeBytes (m .Table .Local .ZombieSize ).String (),
963
963
}
964
964
blobInfoContents := blobInfo {
965
- live : fmt .Sprintf ("%s (%s)" , humanize . Count . Uint64 (m .BlobFiles .LiveCount ), humanize . Bytes . Uint64 (m .BlobFiles .LiveSize )),
966
- zombie : fmt .Sprintf ("%s (%s)" , humanize . Count . Uint64 (m .BlobFiles .ZombieCount ), humanize . Bytes . Uint64 (m .BlobFiles .ZombieSize )),
967
- total : humanize . Bytes . Uint64 (m .BlobFiles .ValueSize ).String (),
968
- referenced : humanize . Bytes . Uint64 (m .BlobFiles .ReferencedValueSize ).String (),
965
+ live : fmt .Sprintf ("%s (%s)" , humanizeCount (m .BlobFiles .LiveCount ), humanizeBytes (m .BlobFiles .LiveSize )),
966
+ zombie : fmt .Sprintf ("%s (%s)" , humanizeCount (m .BlobFiles .ZombieCount ), humanizeBytes (m .BlobFiles .ZombieSize )),
967
+ total : humanizeBytes (m .BlobFiles .ValueSize ).String (),
968
+ referenced : humanizeBytes (m .BlobFiles .ReferencedValueSize ).String (),
969
969
referencedPercent : fmt .Sprintf ("%.0f%%" , percent (m .BlobFiles .ReferencedValueSize , m .BlobFiles .ValueSize )),
970
970
}
971
971
fileInfoContents := tableAndBlobInfo {
@@ -988,13 +988,13 @@ func (m *Metrics) String() string {
988
988
return m .manualMemory [purpose ].InUseBytes
989
989
}
990
990
cgoMemInfoContents := cgoMemInfo {
991
- tot : humanize . Bytes . Uint64 (inUseTotal ).String (),
992
- bcTot : humanize . Bytes . Uint64 (inUse (manual .BlockCacheData ) +
991
+ tot : humanizeBytes (inUseTotal ).String (),
992
+ bcTot : humanizeBytes (inUse (manual .BlockCacheData ) +
993
993
inUse (manual .BlockCacheMap ) + inUse (manual .BlockCacheEntry )).String (),
994
- bcData : humanize . Bytes . Uint64 (inUse (manual .BlockCacheData )).String (),
995
- bcMaps : humanize . Bytes . Uint64 (inUse (manual .BlockCacheMap )).String (),
996
- bcEnts : humanize . Bytes . Uint64 (inUse (manual .BlockCacheEntry )).String (),
997
- memtablesTot : humanize . Bytes . Uint64 (inUse (manual .MemTable )).String (),
994
+ bcData : humanizeBytes (inUse (manual .BlockCacheData )).String (),
995
+ bcMaps : humanizeBytes (inUse (manual .BlockCacheMap )).String (),
996
+ bcEnts : humanizeBytes (inUse (manual .BlockCacheEntry )).String (),
997
+ memtablesTot : humanizeBytes (inUse (manual .MemTable )).String (),
998
998
}
999
999
cgoMemInfoIter := func (yield func (cgoMemInfo ) bool ) {
1000
1000
yield (cgoMemInfoContents )
@@ -1005,11 +1005,11 @@ func (m *Metrics) String() string {
1005
1005
}, max (cgoMemInfoTable .CumulativeFieldWidth , cur .Column ()), - 2 )
1006
1006
1007
1007
compactionMetricsInfoContents := compactionMetricsInfo {
1008
- estimatedDebt : humanize . Bytes . Uint64 (m .Compact .EstimatedDebt ).String (),
1009
- inProgress : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .Compact .NumInProgress ),
1010
- humanize . Bytes . Int64 (m .Compact .InProgressBytes )),
1011
- cancelled : fmt .Sprintf ("%s (%s)" , humanize . Count . Int64 (m .Compact .CancelledCount ),
1012
- humanize . Bytes . Int64 (m .Compact .CancelledBytes )),
1008
+ estimatedDebt : humanizeBytes (m .Compact .EstimatedDebt ).String (),
1009
+ inProgress : fmt .Sprintf ("%s (%s)" , humanizeCount (m .Compact .NumInProgress ),
1010
+ humanizeBytes (m .Compact .InProgressBytes )),
1011
+ cancelled : fmt .Sprintf ("%s (%s)" , humanizeCount (m .Compact .CancelledCount ),
1012
+ humanizeBytes (m .Compact .CancelledBytes )),
1013
1013
failed : m .Compact .FailedCount ,
1014
1014
problemSpans : fmt .Sprintf ("%d%s" , m .Compact .NumProblemSpans , ifNonZero (m .Compact .NumProblemSpans , "!!" )),
1015
1015
}
@@ -1022,11 +1022,11 @@ func (m *Metrics) String() string {
1022
1022
}, max (compactionInfoTable .CumulativeFieldWidth , cur .Column ()), - 2 )
1023
1023
1024
1024
keysInfoContents := keysInfo {
1025
- rangeKeys : humanize . Count . Uint64 (m .Keys .RangeKeySetsCount ).String (),
1026
- tombstones : humanize . Count . Uint64 (m .Keys .TombstoneCount ).String (),
1025
+ rangeKeys : humanizeCount (m .Keys .RangeKeySetsCount ).String (),
1026
+ tombstones : humanizeCount (m .Keys .TombstoneCount ).String (),
1027
1027
missizedTombstones : fmt .Sprintf ("%d%s" , m .Keys .MissizedTombstonesCount , ifNonZero (m .Keys .MissizedTombstonesCount , "!!" )),
1028
- pointDels : humanize . Bytes . Uint64 (m .Table .Garbage .PointDeletionsBytesEstimate ).String (),
1029
- rangeDels : humanize . Bytes . Uint64 (m .Table .Garbage .RangeDeletionsBytesEstimate ).String (),
1028
+ pointDels : humanizeBytes (m .Table .Garbage .PointDeletionsBytesEstimate ).String (),
1029
+ rangeDels : humanizeBytes (m .Table .Garbage .RangeDeletionsBytesEstimate ).String (),
1030
1030
}
1031
1031
keysInfoIter := func (yield func (keysInfo ) bool ) {
1032
1032
yield (keysInfoContents )
@@ -1040,7 +1040,7 @@ func (m *Metrics) String() string {
1040
1040
func (cur ascii.Cursor ) {
1041
1041
maybePrintCompression := func (pos ascii.Cursor , name string , value int64 ) ascii.Cursor {
1042
1042
if value > 0 {
1043
- pos = pos .Printf (" %s %s" , name , humanize . Count . Int64 (value )).NewlineReturn ()
1043
+ pos = pos .Printf (" %s %s" , name , humanizeCount (value )).NewlineReturn ()
1044
1044
}
1045
1045
return pos
1046
1046
}
@@ -1117,3 +1117,11 @@ func (m *Metrics) updateLevelMetrics(updates levelMetricsDelta) {
1117
1117
}
1118
1118
}
1119
1119
}
1120
+
1121
+ func humanizeCount [T crhumanize.Integer ](value T ) crhumanize.SafeString {
1122
+ return crhumanize .Count (value , crhumanize .Compact , crhumanize .OmitI )
1123
+ }
1124
+
1125
+ func humanizeBytes [T crhumanize.Integer ](value T ) crhumanize.SafeString {
1126
+ return crhumanize .Bytes (value , crhumanize .Compact , crhumanize .OmitI )
1127
+ }
0 commit comments