forked from moby/moby
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metrics.go
172 lines (146 loc) · 4.9 KB
/
metrics.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
package daemon
import (
"path/filepath"
"sync"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/plugingetter"
metrics "github.com/docker/go-metrics"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
)
const metricsPluginType = "MetricsCollector"
var (
containerActions metrics.LabeledTimer
imageActions metrics.LabeledTimer
networkActions metrics.LabeledTimer
engineInfo metrics.LabeledGauge
engineCpus metrics.Gauge
engineMemory metrics.Gauge
healthChecksCounter metrics.Counter
healthChecksFailedCounter metrics.Counter
stateCtr *stateCounter
)
func init() {
ns := metrics.NewNamespace("engine", "daemon", nil)
containerActions = ns.NewLabeledTimer("container_actions", "The number of seconds it takes to process each container action", "action")
for _, a := range []string{
"start",
"changes",
"commit",
"create",
"delete",
} {
containerActions.WithValues(a).Update(0)
}
networkActions = ns.NewLabeledTimer("network_actions", "The number of seconds it takes to process each network action", "action")
engineInfo = ns.NewLabeledGauge("engine", "The information related to the engine and the OS it is running on", metrics.Unit("info"),
"version",
"commit",
"architecture",
"graphdriver",
"kernel", "os",
"os_type",
"daemon_id", // ID is a randomly generated unique identifier (e.g. UUID4)
)
engineCpus = ns.NewGauge("engine_cpus", "The number of cpus that the host system of the engine has", metrics.Unit("cpus"))
engineMemory = ns.NewGauge("engine_memory", "The number of bytes of memory that the host system of the engine has", metrics.Bytes)
healthChecksCounter = ns.NewCounter("health_checks", "The total number of health checks")
healthChecksFailedCounter = ns.NewCounter("health_checks_failed", "The total number of failed health checks")
imageActions = ns.NewLabeledTimer("image_actions", "The number of seconds it takes to process each image action", "action")
stateCtr = newStateCounter(ns.NewDesc("container_states", "The count of containers in various states", metrics.Unit("containers"), "state"))
ns.Add(stateCtr)
metrics.Register(ns)
}
type stateCounter struct {
mu sync.Mutex
states map[string]string
desc *prometheus.Desc
}
func newStateCounter(desc *prometheus.Desc) *stateCounter {
return &stateCounter{
states: make(map[string]string),
desc: desc,
}
}
func (ctr *stateCounter) get() (running int, paused int, stopped int) {
ctr.mu.Lock()
defer ctr.mu.Unlock()
states := map[string]int{
"running": 0,
"paused": 0,
"stopped": 0,
}
for _, state := range ctr.states {
states[state]++
}
return states["running"], states["paused"], states["stopped"]
}
func (ctr *stateCounter) set(id, label string) {
ctr.mu.Lock()
ctr.states[id] = label
ctr.mu.Unlock()
}
func (ctr *stateCounter) del(id string) {
ctr.mu.Lock()
delete(ctr.states, id)
ctr.mu.Unlock()
}
func (ctr *stateCounter) Describe(ch chan<- *prometheus.Desc) {
ch <- ctr.desc
}
func (ctr *stateCounter) Collect(ch chan<- prometheus.Metric) {
running, paused, stopped := ctr.get()
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(running), "running")
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(paused), "paused")
ch <- prometheus.MustNewConstMetric(ctr.desc, prometheus.GaugeValue, float64(stopped), "stopped")
}
func (d *Daemon) cleanupMetricsPlugins() {
ls := d.PluginStore.GetAllManagedPluginsByCap(metricsPluginType)
var wg sync.WaitGroup
wg.Add(len(ls))
for _, p := range ls {
go func() {
defer wg.Done()
pluginStopMetricsCollection(p)
}()
}
wg.Wait()
if d.metricsPluginListener != nil {
d.metricsPluginListener.Close()
}
}
type metricsPlugin struct {
plugingetter.CompatPlugin
}
func (p metricsPlugin) sock() string {
return "metrics.sock"
}
func (p metricsPlugin) sockBase() string {
return filepath.Join(p.BasePath(), "run", "docker")
}
func pluginStartMetricsCollection(p plugingetter.CompatPlugin) error {
type metricsPluginResponse struct {
Err string
}
var res metricsPluginResponse
if err := p.Client().Call(metricsPluginType+".StartMetrics", nil, &res); err != nil {
return errors.Wrap(err, "could not start metrics plugin")
}
if res.Err != "" {
return errors.New(res.Err)
}
return nil
}
func pluginStopMetricsCollection(p plugingetter.CompatPlugin) {
if err := p.Client().Call(metricsPluginType+".StopMetrics", nil, nil); err != nil {
logrus.WithError(err).WithField("name", p.Name()).Error("error stopping metrics collector")
}
mp := metricsPlugin{p}
sockPath := filepath.Join(mp.sockBase(), mp.sock())
if err := mount.Unmount(sockPath); err != nil {
if mounted, _ := mount.Mounted(sockPath); mounted {
logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin")
}
}
}