From aa3f68aedc31d6ac39b933d5c6f1ad7d91cc9e6f Mon Sep 17 00:00:00 2001 From: Mohammad Aziz Date: Sun, 30 Nov 2025 18:56:00 +0530 Subject: [PATCH] Add disk blocks read per second --- app/services/metrics/metrics.go | 1 + domain/metrics/metrics.go | 1 + internal/pgmetrics/collector.go | 15 +++++++++++++++ internal/pgmetrics/collector_test.go | 10 ++++++---- 4 files changed, 23 insertions(+), 4 deletions(-) diff --git a/app/services/metrics/metrics.go b/app/services/metrics/metrics.go index daa0dc0..cec704c 100644 --- a/app/services/metrics/metrics.go +++ b/app/services/metrics/metrics.go @@ -144,6 +144,7 @@ func (mp *metricspusher) Push(cred credential.Credential) error { ReplicationLagSeconds: dbMetrics.ReplicationLagSeconds, CacheHitRatio: dbMetrics.CacheHitRatio, TransactionsPerSecond: dbMetrics.TransactionsPerSecond, + BlocksReadPerSecond: dbMetrics.BlocksReadPerSecond, } return mp.apiserver.PushPostgreSQLMetrics(ctx, combined, agentID) diff --git a/domain/metrics/metrics.go b/domain/metrics/metrics.go index 6fe3338..b1015e5 100644 --- a/domain/metrics/metrics.go +++ b/domain/metrics/metrics.go @@ -14,4 +14,5 @@ type PostgreSQLMetrics struct { ReplicationLagSeconds int `json:"replication_lag_seconds"` CacheHitRatio float64 `json:"cache_hit_ratio"` TransactionsPerSecond float64 `json:"transactions_per_second"` + BlocksReadPerSecond float64 `json:"blocks_read_per_second"` } diff --git a/internal/pgmetrics/collector.go b/internal/pgmetrics/collector.go index d752184..929e748 100644 --- a/internal/pgmetrics/collector.go +++ b/internal/pgmetrics/collector.go @@ -16,6 +16,7 @@ type DatabaseMetrics struct { MaxConnections int CacheHitRatio float64 TransactionsPerSecond float64 + BlocksReadPerSecond float64 ReplicationLagSeconds int } @@ -113,6 +114,20 @@ func (pgm pgmetrics) collectDatabaseMetrics(ctx context.Context, db *sql.DB, m * m.TransactionsPerSecond = 0 } + // Blocks read per second + blocksQuery := ` + SELECT + COALESCE(ROUND( + sum(blks_read)::numeric / + NULLIF(EXTRACT(EPOCH FROM (now() - min(stats_reset))), 0) + , 2), 0) as blocks_read_per_sec + FROM pg_stat_database + WHERE stats_reset IS NOT NULL; + ` + if err := db.QueryRowContext(ctx, blocksQuery).Scan(&m.BlocksReadPerSecond); err != nil { + m.BlocksReadPerSecond = 0 + } + return nil } diff --git a/internal/pgmetrics/collector_test.go b/internal/pgmetrics/collector_test.go index cbdc051..8b51ed5 100644 --- a/internal/pgmetrics/collector_test.go +++ b/internal/pgmetrics/collector_test.go @@ -109,6 +109,9 @@ func TestCollector_Collect(t *testing.T) { // Verify connections are being tracked assert.GreaterOrEqual(t, metrics.ConnectionsTotal, 1, "should have at least 1 connection") + // Verify max connections is set (default is typically 100) + assert.Greater(t, metrics.MaxConnections, 0, "max_connections should be greater than 0") + // Verify cache hit ratio is calculated (should be between 0 and 100) assert.GreaterOrEqual(t, metrics.CacheHitRatio, 0.0) assert.LessOrEqual(t, metrics.CacheHitRatio, 100.0) @@ -116,10 +119,11 @@ func TestCollector_Collect(t *testing.T) { // TPS might be 0 for a new database without stats_reset or very low activity assert.GreaterOrEqual(t, metrics.TransactionsPerSecond, 0.0) + // Blocks read per second might be 0 for a new database without stats_reset + assert.GreaterOrEqual(t, metrics.BlocksReadPerSecond, 0.0) + // Replication lag should be 0 when no replication is configured assert.Equal(t, 0, metrics.ReplicationLagSeconds) - - t.Logf("Collected metrics: %+v", metrics) } func TestCollector_Collect_InvalidCredentials(t *testing.T) { @@ -154,8 +158,6 @@ func TestCollector_Collect_CacheHitRatio(t *testing.T) { // It should be high since we're reading the same data multiple times assert.Greater(t, metrics.CacheHitRatio, 0.0, "cache hit ratio should be greater than 0 after queries") - - t.Logf("Cache hit ratio: %.2f%%", metrics.CacheHitRatio) } func TestCollector_Collect_Timeout(t *testing.T) {