forked from Technofy/cloudwatch_exporter
-
Notifications
You must be signed in to change notification settings - Fork 86
/
scraper.go
146 lines (120 loc) · 3.28 KB
/
scraper.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
package basic
import (
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
"github.com/percona/rds_exporter/config"
)
var (
Period = 60 * time.Second
Delay = 600 * time.Second
Range = 600 * time.Second
)
type Scraper struct {
// params
instance *config.Instance
collector *Collector
ch chan<- prometheus.Metric
// internal
svc *cloudwatch.CloudWatch
constLabels prometheus.Labels
}
func NewScraper(instance *config.Instance, collector *Collector, ch chan<- prometheus.Metric) *Scraper {
// Create CloudWatch client
sess, _ := collector.sessions.GetSession(instance.Region, instance.Instance)
if sess == nil {
return nil
}
svc := cloudwatch.New(sess)
constLabels := prometheus.Labels{
"region": instance.Region,
"instance": instance.Instance,
}
for n, v := range instance.Labels {
if v == "" {
delete(constLabels, n)
} else {
constLabels[n] = v
}
}
return &Scraper{
// params
instance: instance,
collector: collector,
ch: ch,
// internal
svc: svc,
constLabels: constLabels,
}
}
func getLatestDatapoint(datapoints []*cloudwatch.Datapoint) *cloudwatch.Datapoint {
var latest *cloudwatch.Datapoint = nil
for dp := range datapoints {
if latest == nil || latest.Timestamp.Before(*datapoints[dp].Timestamp) {
latest = datapoints[dp]
}
}
return latest
}
// Scrape makes the required calls to AWS CloudWatch by using the parameters in the Collector.
// Once converted into Prometheus format, the metrics are pushed on the ch channel.
func (s *Scraper) Scrape() {
var wg sync.WaitGroup
defer wg.Wait()
wg.Add(len(s.collector.metrics))
for _, metric := range s.collector.metrics {
metric := metric
go func() {
defer wg.Done()
if err := s.scrapeMetric(metric); err != nil {
level.Error(s.collector.l).Log("metric", metric.cwName, "error", err)
}
}()
}
}
func (s *Scraper) scrapeMetric(metric Metric) error {
now := time.Now()
end := now.Add(-Delay)
params := &cloudwatch.GetMetricStatisticsInput{
EndTime: aws.Time(end),
StartTime: aws.Time(end.Add(-Range)),
Period: aws.Int64(int64(Period.Seconds())),
MetricName: aws.String(metric.cwName),
Namespace: aws.String("AWS/RDS"),
Dimensions: []*cloudwatch.Dimension{},
Statistics: aws.StringSlice([]string{"Average"}),
Unit: nil,
}
params.Dimensions = append(params.Dimensions, &cloudwatch.Dimension{
Name: aws.String("DBInstanceIdentifier"),
Value: aws.String(s.instance.Instance),
})
// Call CloudWatch to gather the datapoints
resp, err := s.svc.GetMetricStatistics(params)
if err != nil {
return err
}
// There's nothing in there, don't publish the metric
if len(resp.Datapoints) == 0 {
return nil
}
// Pick the latest datapoint
dp := getLatestDatapoint(resp.Datapoints)
// Get the metric.
v := aws.Float64Value(dp.Average)
switch metric.cwName {
case "EngineUptime":
// "Fake EngineUptime -> node_boot_time with time.Now().Unix() - EngineUptime."
v = float64(time.Now().Unix() - int64(v))
}
// Send metric.
s.ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc(metric.prometheusName, metric.prometheusHelp, nil, s.constLabels),
prometheus.GaugeValue,
v,
)
return nil
}