forked from cloudflare/cloudflared
/
metrics.go
126 lines (112 loc) · 3.21 KB
/
metrics.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
package metrics
import (
"context"
"fmt"
"net"
"net/http"
_ "net/http/pprof"
"runtime"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/rs/zerolog"
"golang.org/x/net/trace"
)
const (
startupTime = time.Millisecond * 500
defaultShutdownTimeout = time.Second * 15
)
type Config struct {
ReadyServer *ReadyServer
QuickTunnelHostname string
Orchestrator orchestrator
ShutdownTimeout time.Duration
}
type orchestrator interface {
GetVersionedConfigJSON() ([]byte, error)
}
func newMetricsHandler(
config Config,
log *zerolog.Logger,
) *http.ServeMux {
router := http.NewServeMux()
router.Handle("/debug/", http.DefaultServeMux)
router.Handle("/metrics", promhttp.Handler())
router.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, "OK\n")
})
if config.ReadyServer != nil {
router.Handle("/ready", config.ReadyServer)
}
router.HandleFunc("/quicktunnel", func(w http.ResponseWriter, r *http.Request) {
_, _ = fmt.Fprintf(w, `{"hostname":"%s"}`, config.QuickTunnelHostname)
})
if config.Orchestrator != nil {
router.HandleFunc("/config", func(w http.ResponseWriter, r *http.Request) {
json, err := config.Orchestrator.GetVersionedConfigJSON()
if err != nil {
w.WriteHeader(500)
_, _ = fmt.Fprintf(w, "ERR: %v", err)
log.Err(err).Msg("Failed to serve config")
return
}
_, _ = w.Write(json)
})
}
return router
}
func ServeMetrics(
l net.Listener,
ctx context.Context,
config Config,
log *zerolog.Logger,
) (err error) {
var wg sync.WaitGroup
// Metrics port is privileged, so no need for further access control
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
// profile CPU usage depends on WriteTimeout
h := newMetricsHandler(config, log)
server := &http.Server{
ReadTimeout: 10 * time.Second,
WriteTimeout: 10 * time.Second,
Handler: h,
}
wg.Add(1)
go func() {
defer wg.Done()
err = server.Serve(l)
}()
log.Info().Msgf("Starting metrics server on %s", fmt.Sprintf("%v/metrics", l.Addr()))
// server.Serve will hang if server.Shutdown is called before the server is
// fully started up. So add artificial delay.
time.Sleep(startupTime)
<-ctx.Done()
shutdownTimeout := config.ShutdownTimeout
if shutdownTimeout == 0 {
shutdownTimeout = defaultShutdownTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
_ = server.Shutdown(ctx)
cancel()
wg.Wait()
if err == http.ErrServerClosed {
log.Info().Msg("Metrics server stopped")
return nil
}
log.Err(err).Msg("Metrics server failed")
return err
}
func RegisterBuildInfo(buildType, buildTime, version string) {
buildInfo := prometheus.NewGaugeVec(
prometheus.GaugeOpts{
// Don't namespace build_info, since we want it to be consistent across all Cloudflare services
Name: "build_info",
Help: "Build and version information",
},
[]string{"goversion", "type", "revision", "version"},
)
prometheus.MustRegister(buildInfo)
buildInfo.WithLabelValues(runtime.Version(), buildType, buildTime, version).Set(1)
}