-
Notifications
You must be signed in to change notification settings - Fork 3.2k
/
server.go
110 lines (93 loc) · 2.93 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
package metrics
import (
"context"
"fmt"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
runtimeutil "k8s.io/apimachinery/pkg/util/runtime"
)
// RunServer starts a metrics server
func (m *Metrics) RunServer(ctx context.Context) {
defer runtimeutil.HandleCrash(runtimeutil.PanicHandlers...)
if !m.metricsConfig.Enabled {
// If metrics aren't enabled, return
return
}
metricsRegistry := prometheus.NewRegistry()
metricsRegistry.MustRegister(m)
if m.metricsConfig.SameServerAs(m.telemetryConfig) {
// If the metrics and telemetry servers are the same, run both of them in the same instance
metricsRegistry.MustRegister(prometheus.NewGoCollector())
} else if m.telemetryConfig.Enabled {
// If the telemetry server is different -- and it's enabled -- run each on its own instance
telemetryRegistry := prometheus.NewRegistry()
telemetryRegistry.MustRegister(prometheus.NewGoCollector())
go runServer(m.telemetryConfig, telemetryRegistry, ctx)
}
// Run the metrics server
go runServer(m.metricsConfig, metricsRegistry, ctx)
go m.garbageCollector(ctx)
}
func runServer(config ServerConfig, registry *prometheus.Registry, ctx context.Context) {
var handlerOpts promhttp.HandlerOpts
if config.IgnoreErrors {
handlerOpts.ErrorHandling = promhttp.ContinueOnError
}
mux := http.NewServeMux()
mux.Handle(config.Path, promhttp.HandlerFor(registry, handlerOpts))
srv := &http.Server{Addr: fmt.Sprintf(":%v", config.Port), Handler: mux}
go func() {
log.Infof("Starting prometheus metrics server at localhost:%v%s", config.Port, config.Path)
if err := srv.ListenAndServe(); err != nil {
panic(err)
}
}()
// Waiting for stop signal
<-ctx.Done()
// Shutdown the server gracefully with a 1 second timeout
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
defer cancel()
if err := srv.Shutdown(ctx); err != nil {
log.Infof("Unable to shutdown metrics server at localhost:%v%s", config.Port, config.Path)
}
}
func (m *Metrics) Describe(ch chan<- *prometheus.Desc) {
for _, metric := range m.allMetrics() {
ch <- metric.Desc()
}
m.logMetric.Describe(ch)
K8sRequestTotalMetric.Describe(ch)
PodMissingMetric.Describe(ch)
WorkflowConditionMetric.Describe(ch)
}
func (m *Metrics) Collect(ch chan<- prometheus.Metric) {
for _, metric := range m.allMetrics() {
ch <- metric
}
m.logMetric.Collect(ch)
K8sRequestTotalMetric.Collect(ch)
PodMissingMetric.Collect(ch)
WorkflowConditionMetric.Collect(ch)
}
func (m *Metrics) garbageCollector(ctx context.Context) {
if m.metricsConfig.TTL == 0 {
return
}
ticker := time.NewTicker(m.metricsConfig.TTL)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
for key, metric := range m.customMetrics {
if time.Since(metric.lastUpdated) > m.metricsConfig.TTL {
delete(m.customMetrics, key)
}
}
}
}
}