Skip to content

Commit

Permalink
fix: remove unsupported columns.
Browse files Browse the repository at this point in the history
Removing colums unsupported for versions of Postgres < v14 in database
and replication_clot collectors.
  • Loading branch information
alessio-form3 committed Jun 21, 2024
1 parent c68de8e commit 19955f1
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 68 deletions.
20 changes: 1 addition & 19 deletions collector/pg_replication_slot.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,6 @@ var (
"whether the replication slot is active or not",
[]string{"slot_name", "slot_type"}, nil,
)
pgReplicationSlotSafeWal = prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
replicationSlotSubsystem,
"safe_wal_size_bytes",
),
"number of bytes that can be written to WAL such that this slot is not in danger of getting in state lost",
[]string{"slot_name", "slot_type"}, nil,
)
pgReplicationSlotWalStatus = prometheus.NewDesc(
prometheus.BuildFQName(
namespace,
Expand All @@ -92,7 +83,6 @@ var (
END AS current_wal_lsn,
COALESCE(confirmed_flush_lsn, '0/0') - '0/0' AS confirmed_flush_lsn,
active,
safe_wal_size,
wal_status
FROM pg_replication_slots;`
)
Expand All @@ -112,9 +102,8 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance
var walLSN sql.NullFloat64
var flushLSN sql.NullFloat64
var isActive sql.NullBool
var safeWalSize sql.NullInt64
var walStatus sql.NullString
if err := rows.Scan(&slotName, &slotType, &walLSN, &flushLSN, &isActive, &safeWalSize, &walStatus); err != nil {
if err := rows.Scan(&slotName, &slotType, &walLSN, &flushLSN, &isActive, &walStatus); err != nil {
return err
}

Expand Down Expand Up @@ -154,13 +143,6 @@ func (PGReplicationSlotCollector) Update(ctx context.Context, instance *instance
prometheus.GaugeValue, isActiveValue, slotNameLabel, slotTypeLabel,
)

if safeWalSize.Valid {
ch <- prometheus.MustNewConstMetric(
pgReplicationSlotSafeWal,
prometheus.GaugeValue, float64(safeWalSize.Int64), slotNameLabel, slotTypeLabel,
)
}

if walStatus.Valid {
ch <- prometheus.MustNewConstMetric(
pgReplicationSlotWalStatus,
Expand Down
18 changes: 8 additions & 10 deletions collector/pg_replication_slot_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ func TestPgReplicationSlotCollectorActive(t *testing.T) {

inst := &instance{db: db}

columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"}
columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"}
rows := sqlmock.NewRows(columns).
AddRow("test_slot", "physical", 5, 3, true, 323906992, "reserved")
AddRow("test_slot", "physical", 5, 3, true, "reserved")
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)

ch := make(chan prometheus.Metric)
Expand All @@ -50,7 +50,6 @@ func TestPgReplicationSlotCollectorActive(t *testing.T) {
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 5, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 3, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 1, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 323906992, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "reserved"}, value: 1, metricType: dto.MetricType_GAUGE},
}

Expand All @@ -74,9 +73,9 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) {

inst := &instance{db: db}

columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"}
columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"}
rows := sqlmock.NewRows(columns).
AddRow("test_slot", "physical", 6, 12, false, -4000, "extended")
AddRow("test_slot", "physical", 6, 12, false, "extended")
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)

ch := make(chan prometheus.Metric)
Expand All @@ -92,7 +91,6 @@ func TestPgReplicationSlotCollectorInActive(t *testing.T) {
expected := []MetricResult{
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 6, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: 0, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical"}, value: -4000, metricType: dto.MetricType_GAUGE},
{labels: labelMap{"slot_name": "test_slot", "slot_type": "physical", "wal_status": "extended"}, value: 1, metricType: dto.MetricType_GAUGE},
}

Expand All @@ -117,9 +115,9 @@ func TestPgReplicationSlotCollectorActiveNil(t *testing.T) {

inst := &instance{db: db}

columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"}
columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"}
rows := sqlmock.NewRows(columns).
AddRow("test_slot", "physical", 6, 12, nil, nil, "lost")
AddRow("test_slot", "physical", 6, 12, nil, "lost")
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)

ch := make(chan prometheus.Metric)
Expand Down Expand Up @@ -158,9 +156,9 @@ func TestPgReplicationSlotCollectorTestNilValues(t *testing.T) {

inst := &instance{db: db}

columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "safe_wal_size", "wal_status"}
columns := []string{"slot_name", "slot_type", "current_wal_lsn", "confirmed_flush_lsn", "active", "wal_status"}
rows := sqlmock.NewRows(columns).
AddRow(nil, nil, nil, nil, true, nil, nil)
AddRow(nil, nil, nil, nil, true, nil)
mock.ExpectQuery(sanitizeQuery(pgReplicationSlotQuery)).WillReturnRows(rows)

ch := make(chan prometheus.Metric)
Expand Down
24 changes: 1 addition & 23 deletions collector/pg_stat_database.go
Original file line number Diff line number Diff line change
Expand Up @@ -206,15 +206,6 @@ var (
[]string{"datid", "datname"},
prometheus.Labels{},
)
statDatabaseActiveTime = prometheus.NewDesc(prometheus.BuildFQName(
namespace,
statDatabaseSubsystem,
"active_time_seconds_total",
),
"Time spent executing SQL statements in this database, in seconds",
[]string{"datid", "datname"},
prometheus.Labels{},
)

statDatabaseQuery = `
SELECT
Expand All @@ -236,7 +227,6 @@ var (
,deadlocks
,blk_read_time
,blk_write_time
,active_time
,stats_reset
FROM pg_stat_database;
`
Expand All @@ -254,7 +244,7 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance

for rows.Next() {
var datid, datname sql.NullString
var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime, activeTime sql.NullFloat64
var numBackends, xactCommit, xactRollback, blksRead, blksHit, tupReturned, tupFetched, tupInserted, tupUpdated, tupDeleted, conflicts, tempFiles, tempBytes, deadlocks, blkReadTime, blkWriteTime sql.NullFloat64
var statsReset sql.NullTime

err := rows.Scan(
Expand All @@ -276,7 +266,6 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
&deadlocks,
&blkReadTime,
&blkWriteTime,
&activeTime,
&statsReset,
)
if err != nil {
Expand Down Expand Up @@ -355,10 +344,6 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no blk_write_time")
continue
}
if !activeTime.Valid {
level.Debug(c.log).Log("msg", "Skipping collecting metric because it has no active_time")
continue
}

statsResetMetric := 0.0
if !statsReset.Valid {
Expand Down Expand Up @@ -482,13 +467,6 @@ func (c *PGStatDatabaseCollector) Update(ctx context.Context, instance *instance
labels...,
)

ch <- prometheus.MustNewConstMetric(
statDatabaseActiveTime,
prometheus.CounterValue,
activeTime.Float64/1000.0,
labels...,
)

ch <- prometheus.MustNewConstMetric(
statDatabaseStatsReset,
prometheus.CounterValue,
Expand Down
16 changes: 0 additions & 16 deletions collector/pg_stat_database_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ func TestPGStatDatabaseCollector(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}

Expand Down Expand Up @@ -81,7 +80,6 @@ func TestPGStatDatabaseCollector(t *testing.T) {
925,
16,
823,
33,
srT)

mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
Expand Down Expand Up @@ -115,7 +113,6 @@ func TestPGStatDatabaseCollector(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.033},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}

Expand Down Expand Up @@ -162,7 +159,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}

Expand All @@ -186,7 +182,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
925,
16,
823,
32,
srT).
AddRow(
"pid",
Expand All @@ -207,7 +202,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
925,
16,
823,
32,
srT)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)

Expand Down Expand Up @@ -240,7 +234,6 @@ func TestPGStatDatabaseCollectorNullValues(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.032},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}

Expand Down Expand Up @@ -282,7 +275,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}

Expand Down Expand Up @@ -311,7 +303,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
925,
16,
823,
14,
srT).
AddRow(
nil,
Expand All @@ -333,7 +324,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
nil,
nil,
nil,
nil,
).
AddRow(
"pid",
Expand All @@ -354,7 +344,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
926,
17,
824,
15,
srT)
mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)

Expand Down Expand Up @@ -387,7 +376,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.014},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},

{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_GAUGE, value: 355},
Expand All @@ -406,7 +394,6 @@ func TestPGStatDatabaseCollectorRowLeakTest(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 926},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 17},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 824},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.015},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 1685059842},
}

Expand Down Expand Up @@ -449,7 +436,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
"deadlocks",
"blk_read_time",
"blk_write_time",
"active_time",
"stats_reset",
}

Expand All @@ -473,7 +459,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
925,
16,
823,
7,
nil)

mock.ExpectQuery(sanitizeQuery(statDatabaseQuery)).WillReturnRows(rows)
Expand Down Expand Up @@ -507,7 +492,6 @@ func TestPGStatDatabaseCollectorTestNilStatReset(t *testing.T) {
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 925},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 16},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 823},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0.007},
{labels: labelMap{"datid": "pid", "datname": "postgres"}, metricType: dto.MetricType_COUNTER, value: 0},
}

Expand Down

0 comments on commit 19955f1

Please sign in to comment.