66
77 "github.com/prometheus/alertmanager/types"
88 "github.com/prometheus/client_golang/prometheus"
9+ "github.com/prometheus/client_golang/prometheus/promauto"
910 "github.com/prometheus/client_golang/prometheus/testutil"
1011 "github.com/stretchr/testify/require"
1112)
@@ -220,48 +221,37 @@ type nflogMetrics struct {
220221func newNflogMetrics (r prometheus.Registerer ) * nflogMetrics {
221222 m := & nflogMetrics {}
222223
223- m .gcDuration = prometheus .NewSummary (prometheus.SummaryOpts {
224+ m .gcDuration = promauto . With ( r ) .NewSummary (prometheus.SummaryOpts {
224225 Name : "alertmanager_nflog_gc_duration_seconds" ,
225226 Help : "Duration of the last notification log garbage collection cycle." ,
226227 Objectives : map [float64 ]float64 {},
227228 })
228- m .snapshotDuration = prometheus .NewSummary (prometheus.SummaryOpts {
229+ m .snapshotDuration = promauto . With ( r ) .NewSummary (prometheus.SummaryOpts {
229230 Name : "alertmanager_nflog_snapshot_duration_seconds" ,
230231 Help : "Duration of the last notification log snapshot." ,
231232 Objectives : map [float64 ]float64 {},
232233 })
233- m .snapshotSize = prometheus .NewGauge (prometheus.GaugeOpts {
234+ m .snapshotSize = promauto . With ( r ) .NewGauge (prometheus.GaugeOpts {
234235 Name : "alertmanager_nflog_snapshot_size_bytes" ,
235236 Help : "Size of the last notification log snapshot in bytes." ,
236237 })
237- m .queriesTotal = prometheus .NewCounter (prometheus.CounterOpts {
238+ m .queriesTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
238239 Name : "alertmanager_nflog_queries_total" ,
239240 Help : "Number of notification log queries were received." ,
240241 })
241- m .queryErrorsTotal = prometheus .NewCounter (prometheus.CounterOpts {
242+ m .queryErrorsTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
242243 Name : "alertmanager_nflog_query_errors_total" ,
243244 Help : "Number notification log received queries that failed." ,
244245 })
245- m .queryDuration = prometheus .NewHistogram (prometheus.HistogramOpts {
246+ m .queryDuration = promauto . With ( r ) .NewHistogram (prometheus.HistogramOpts {
246247 Name : "alertmanager_nflog_query_duration_seconds" ,
247248 Help : "Duration of notification log query evaluation." ,
248249 })
249- m .propagatedMessagesTotal = prometheus .NewCounter (prometheus.CounterOpts {
250+ m .propagatedMessagesTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
250251 Name : "alertmanager_nflog_gossip_messages_propagated_total" ,
251252 Help : "Number of received gossip messages that have been further gossiped." ,
252253 })
253254
254- if r != nil {
255- r .MustRegister (
256- m .gcDuration ,
257- m .snapshotDuration ,
258- m .snapshotSize ,
259- m .queriesTotal ,
260- m .queryErrorsTotal ,
261- m .queryDuration ,
262- m .propagatedMessagesTotal ,
263- )
264- }
265255 return m
266256}
267257
@@ -282,66 +272,52 @@ type silenceMetrics struct {
282272func newSilenceMetrics (r prometheus.Registerer ) * silenceMetrics {
283273 m := & silenceMetrics {}
284274
285- m .gcDuration = prometheus .NewSummary (prometheus.SummaryOpts {
275+ m .gcDuration = promauto . With ( r ) .NewSummary (prometheus.SummaryOpts {
286276 Name : "alertmanager_silences_gc_duration_seconds" ,
287277 Help : "Duration of the last silence garbage collection cycle." ,
288278 Objectives : map [float64 ]float64 {},
289279 })
290- m .snapshotDuration = prometheus .NewSummary (prometheus.SummaryOpts {
280+ m .snapshotDuration = promauto . With ( r ) .NewSummary (prometheus.SummaryOpts {
291281 Name : "alertmanager_silences_snapshot_duration_seconds" ,
292282 Help : "Duration of the last silence snapshot." ,
293283 Objectives : map [float64 ]float64 {},
294284 })
295- m .snapshotSize = prometheus .NewGauge (prometheus.GaugeOpts {
285+ m .snapshotSize = promauto . With ( r ) .NewGauge (prometheus.GaugeOpts {
296286 Name : "alertmanager_silences_snapshot_size_bytes" ,
297287 Help : "Size of the last silence snapshot in bytes." ,
298288 })
299- m .queriesTotal = prometheus .NewCounter (prometheus.CounterOpts {
289+ m .queriesTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
300290 Name : "alertmanager_silences_queries_total" ,
301291 Help : "How many silence queries were received." ,
302292 })
303- m .queryErrorsTotal = prometheus .NewCounter (prometheus.CounterOpts {
293+ m .queryErrorsTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
304294 Name : "alertmanager_silences_query_errors_total" ,
305295 Help : "How many silence received queries did not succeed." ,
306296 })
307- m .queryDuration = prometheus .NewHistogram (prometheus.HistogramOpts {
297+ m .queryDuration = promauto . With ( r ) .NewHistogram (prometheus.HistogramOpts {
308298 Name : "alertmanager_silences_query_duration_seconds" ,
309299 Help : "Duration of silence query evaluation." ,
310300 })
311- m .propagatedMessagesTotal = prometheus .NewCounter (prometheus.CounterOpts {
301+ m .propagatedMessagesTotal = promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
312302 Name : "alertmanager_silences_gossip_messages_propagated_total" ,
313303 Help : "Number of received gossip messages that have been further gossiped." ,
314304 })
315- m .silencesActive = prometheus .NewGauge (prometheus.GaugeOpts {
305+ m .silencesActive = promauto . With ( r ) .NewGauge (prometheus.GaugeOpts {
316306 Name : "alertmanager_silences" ,
317307 Help : "How many silences by state." ,
318308 ConstLabels : prometheus.Labels {"state" : string (types .SilenceStateActive )},
319309 })
320- m .silencesPending = prometheus .NewGauge (prometheus.GaugeOpts {
310+ m .silencesPending = promauto . With ( r ) .NewGauge (prometheus.GaugeOpts {
321311 Name : "alertmanager_silences" ,
322312 Help : "How many silences by state." ,
323313 ConstLabels : prometheus.Labels {"state" : string (types .SilenceStatePending )},
324314 })
325- m .silencesExpired = prometheus .NewGauge (prometheus.GaugeOpts {
315+ m .silencesExpired = promauto . With ( r ) .NewGauge (prometheus.GaugeOpts {
326316 Name : "alertmanager_silences" ,
327317 Help : "How many silences by state." ,
328318 ConstLabels : prometheus.Labels {"state" : string (types .SilenceStateExpired )},
329319 })
330320
331- if r != nil {
332- r .MustRegister (
333- m .gcDuration ,
334- m .snapshotDuration ,
335- m .snapshotSize ,
336- m .queriesTotal ,
337- m .queryErrorsTotal ,
338- m .queryDuration ,
339- m .silencesActive ,
340- m .silencesPending ,
341- m .silencesExpired ,
342- m .propagatedMessagesTotal ,
343- )
344- }
345321 return m
346322}
347323
@@ -354,17 +330,17 @@ type notifyMetrics struct {
354330
355331func newNotifyMetrics (r prometheus.Registerer ) * notifyMetrics {
356332 m := & notifyMetrics {
357- numNotifications : prometheus .NewCounterVec (prometheus.CounterOpts {
333+ numNotifications : promauto . With ( r ) .NewCounterVec (prometheus.CounterOpts {
358334 Namespace : "alertmanager" ,
359335 Name : "notifications_total" ,
360336 Help : "The total number of attempted notifications." ,
361337 }, []string {"integration" }),
362- numFailedNotifications : prometheus .NewCounterVec (prometheus.CounterOpts {
338+ numFailedNotifications : promauto . With ( r ) .NewCounterVec (prometheus.CounterOpts {
363339 Namespace : "alertmanager" ,
364340 Name : "notifications_failed_total" ,
365341 Help : "The total number of failed notifications." ,
366342 }, []string {"integration" }),
367- notificationLatencySeconds : prometheus .NewHistogramVec (prometheus.HistogramOpts {
343+ notificationLatencySeconds : promauto . With ( r ) .NewHistogramVec (prometheus.HistogramOpts {
368344 Namespace : "alertmanager" ,
369345 Name : "notification_latency_seconds" ,
370346 Help : "The latency of notifications in seconds." ,
@@ -376,7 +352,6 @@ func newNotifyMetrics(r prometheus.Registerer) *notifyMetrics {
376352 m .numFailedNotifications .WithLabelValues (integration )
377353 m .notificationLatencySeconds .WithLabelValues (integration )
378354 }
379- r .MustRegister (m .numNotifications , m .numFailedNotifications , m .notificationLatencySeconds )
380355 return m
381356}
382357
@@ -385,15 +360,12 @@ type markerMetrics struct {
385360}
386361
387362func newMarkerMetrics (r prometheus.Registerer ) * markerMetrics {
388- m := & markerMetrics {
389- alerts : prometheus .NewGaugeVec (prometheus.GaugeOpts {
363+ return & markerMetrics {
364+ alerts : promauto . With ( r ) .NewGaugeVec (prometheus.GaugeOpts {
390365 Name : "alertmanager_alerts" ,
391366 Help : "How many alerts by state." ,
392367 }, []string {"state" }),
393368 }
394-
395- r .MustRegister (m .alerts )
396- return m
397369}
398370
399371// Copied from github.com/alertmanager/api/metrics/metrics.go
@@ -404,19 +376,17 @@ type apiMetrics struct {
404376}
405377
406378func newAPIMetrics (version string , r prometheus.Registerer ) * apiMetrics {
407- numReceivedAlerts := prometheus .NewCounterVec (prometheus.CounterOpts {
379+ numReceivedAlerts := promauto . With ( r ) .NewCounterVec (prometheus.CounterOpts {
408380 Name : "alertmanager_alerts_received_total" ,
409381 Help : "The total number of received alerts." ,
410382 ConstLabels : prometheus.Labels {"version" : version },
411383 }, []string {"status" })
412- numInvalidAlerts := prometheus .NewCounter (prometheus.CounterOpts {
384+ numInvalidAlerts := promauto . With ( r ) .NewCounter (prometheus.CounterOpts {
413385 Name : "alertmanager_alerts_invalid_total" ,
414386 Help : "The total number of received alerts that were invalid." ,
415387 ConstLabels : prometheus.Labels {"version" : version },
416388 })
417- if r != nil {
418- r .MustRegister (numReceivedAlerts , numInvalidAlerts )
419- }
389+
420390 return & apiMetrics {
421391 firing : numReceivedAlerts .WithLabelValues ("firing" ),
422392 resolved : numReceivedAlerts .WithLabelValues ("resolved" ),
0 commit comments