/
cpu.go
79 lines (64 loc) · 1.77 KB
/
cpu.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
package metrics
import (
"fmt"
"os"
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/procfs"
)
const (
MetricCPUUTimeSeconds = "oasis_node_cpu_utime_seconds"
MetricCPUSTimeSeconds = "oasis_node_cpu_stime_seconds"
// getconf CLK_TCK
ClockTicks = 100
)
var (
utimeGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: MetricCPUUTimeSeconds,
Help: "CPU user time spent by worker as reported by /proc/<PID>/stat (seconds).",
},
)
stimeGauge = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: MetricCPUSTimeSeconds,
Help: "CPU system time spent by worker as reported by /proc/<PID>/stat (seconds).",
},
)
cpuCollectors = []prometheus.Collector{utimeGauge, stimeGauge}
cpuServiceOnce sync.Once
)
type cpuCollector struct {
// TODO: Should we monitor memory of children PIDs as well?
pid int
}
func (c *cpuCollector) Name() string {
return "cpu"
}
func (c *cpuCollector) Update() error {
// Obtain process CPU info.
proc, err := procfs.NewProc(c.pid)
if err != nil {
return fmt.Errorf("CPU metric: failed to obtain proc object for PID %d: %w", c.pid, err)
}
procStat, err := proc.Stat()
if err != nil {
return fmt.Errorf("CPU metric: failed to obtain procStat object %d: %w", c.pid, err)
}
utimeGauge.Set(float64(procStat.UTime) / float64(ClockTicks))
stimeGauge.Set(float64(procStat.STime) / float64(ClockTicks))
return nil
}
// NewCPUCollector constructs a new CPU usage collector.
//
// This service will regularly read CPU spent time info from process Stat file.
func NewCPUCollector() ResourceCollector {
cs := &cpuCollector{
pid: os.Getpid(),
}
// CPU metrics are singletons per process. Ensure to register them only once.
cpuServiceOnce.Do(func() {
prometheus.MustRegister(cpuCollectors...)
})
return cs
}