-
Notifications
You must be signed in to change notification settings - Fork 25
/
collector.go
104 lines (91 loc) · 3.67 KB
/
collector.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
package custom
import (
"context"
"time"
"github.com/content-services/content-sources-backend/pkg/config"
"github.com/content-services/content-sources-backend/pkg/dao"
"github.com/content-services/content-sources-backend/pkg/instrumentation"
"github.com/content-services/content-sources-backend/pkg/pulp_client"
uuid2 "github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog/log"
"gorm.io/gorm"
)
const tickerDelay = 30 // in seconds // could be good to match this with the scrapper frequency
type Collector struct {
context context.Context
metrics *instrumentation.Metrics
dao dao.MetricsDao
pulpClient pulp_client.PulpGlobalClient
}
func NewCollector(ctx context.Context, metrics *instrumentation.Metrics, db *gorm.DB, pulp pulp_client.PulpGlobalClient) *Collector {
if ctx == nil {
return nil
}
if metrics == nil {
return nil
}
if db == nil {
return nil
}
ctx = log.Logger.Level(config.MetricsLevel()).WithContext(ctx)
ctx = context.WithValue(ctx, config.ContextRequestIDKey{}, uuid2.NewString())
collector := &Collector{
// Allow overriding metrics logging
context: ctx,
metrics: metrics,
dao: dao.GetMetricsDao(db),
pulpClient: pulp,
}
collector.iterateExpiryTime() // iterate once to get accurate values
return collector
}
func (c *Collector) iterateExpiryTime() {
expire, err := config.CDNCertDaysTillExpiration()
if err == nil {
c.metrics.RHCertExpiryDays.Set(float64(expire))
} else {
log.Ctx(c.context).Error().Err(err).Msgf("Could not calculate cdn cert expiration")
}
}
func (c *Collector) iterate() {
ctx := c.context
c.iterateExpiryTime()
c.metrics.RepositoriesTotal.Set(float64(c.dao.RepositoriesCount(ctx)))
c.metrics.RepositoryConfigsTotal.Set(float64(c.dao.RepositoryConfigsCount(ctx)))
c.metrics.RepositoryConfigsTotal.Set(float64(c.dao.RepositoryConfigsCount(ctx)))
c.metrics.OrgTotal.Set(float64(c.dao.OrganizationTotal(ctx)))
public := c.dao.RepositoriesIntrospectionCount(ctx, 36, true)
c.metrics.PublicRepositories36HourIntrospectionTotal.With(prometheus.Labels{"status": "introspected"}).Set(float64(public.Introspected))
c.metrics.PublicRepositories36HourIntrospectionTotal.With(prometheus.Labels{"status": "missed"}).Set(float64(public.Missed))
custom := c.dao.RepositoriesIntrospectionCount(ctx, 36, false)
c.metrics.CustomRepositories36HourIntrospectionTotal.With(prometheus.Labels{"status": "introspected"}).Set(float64(custom.Introspected))
c.metrics.CustomRepositories36HourIntrospectionTotal.With(prometheus.Labels{"status": "missed"}).Set(float64(custom.Missed))
c.metrics.PublicRepositoriesWithFailedIntrospectionTotal.Set(float64(c.dao.PublicRepositoriesFailedIntrospectionCount(ctx)))
latency := c.dao.PendingTasksAverageLatency(ctx)
c.metrics.TaskStats.With(prometheus.Labels{"label": instrumentation.TaskStatsLabelAverageWait}).Set(latency)
pendingCount := c.dao.PendingTasksCount(ctx)
c.metrics.TaskStats.With(prometheus.Labels{"label": instrumentation.TaskStatsLabelPendingCount}).Set(float64(pendingCount))
oldestQueuedSecs := c.dao.PendingTasksOldestTask(ctx)
c.metrics.TaskStats.With(prometheus.Labels{"label": instrumentation.TaskStatsLabelOldestWait}).Set(oldestQueuedSecs)
_, err := c.pulpClient.LookupDomain(ctx, pulp_client.DefaultDomain)
if err != nil {
c.metrics.PulpConnectivity.Set(0)
} else {
c.metrics.PulpConnectivity.Set(1)
}
}
func (c *Collector) Run() {
log.Info().Msg("Starting metrics collector go routine")
ticker := time.NewTicker(tickerDelay * time.Second)
for {
select {
case <-ticker.C:
c.iterate()
case <-c.context.Done():
log.Info().Msgf("Stopping metrics collector go routine")
ticker.Stop()
return
}
}
}