forked from vmware-archive/atc
-
Notifications
You must be signed in to change notification settings - Fork 1
/
newrelic.go
183 lines (155 loc) · 5.44 KB
/
newrelic.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
package emitter
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"code.cloudfoundry.org/lager"
"github.com/concourse/atc/metric"
)
type (
stats struct {
created interface{}
deleted interface{}
}
NewRelicEmitter struct {
client *http.Client
url string
apikey string
prefix string
containers *stats
volumes *stats
}
NewRelicConfig struct {
AccountID string `long:"newrelic-account-id" description:"New Relic Account ID"`
APIKey string `long:"newrelic-api-key" description:"New Relic Insights API Key"`
ServicePrefix string `long:"newrelic-service-prefix" default:"" description:"An optional prefix for emitted New Relic events"`
}
singlePayload map[string]interface{}
fullPayload []singlePayload
)
func init() {
metric.RegisterEmitter(&NewRelicConfig{})
}
func (config *NewRelicConfig) Description() string { return "NewRelic" }
func (config *NewRelicConfig) IsConfigured() bool {
return config.AccountID != "" && config.APIKey != ""
}
func (config *NewRelicConfig) NewEmitter() (metric.Emitter, error) {
client := &http.Client{
Transport: &http.Transport{},
Timeout: time.Minute,
}
return &NewRelicEmitter{
client: client,
url: fmt.Sprintf("https://insights-collector.newrelic.com/v1/accounts/%s/events", config.AccountID),
apikey: config.APIKey,
prefix: config.ServicePrefix,
containers: new(stats),
volumes: new(stats),
}, nil
}
func (emitter *NewRelicEmitter) simplePayload(logger lager.Logger, event metric.Event, nameOverride string) singlePayload {
name := nameOverride
if name == "" {
name = strings.Replace(event.Name, " ", "_", -1)
}
eventType := fmt.Sprintf("%s%s", emitter.prefix, name)
payload := singlePayload{
"eventType": eventType,
"value": event.Value,
"state": string(event.State),
"host": event.Host,
"timestamp": event.Time.Unix(),
}
for k, v := range event.Attributes {
payload[fmt.Sprintf("_%s", k)] = v
}
return payload
}
func (emitter *NewRelicEmitter) emitPayload(logger lager.Logger, payload fullPayload) {
payloadJSON, err := json.Marshal(payload)
if err != nil {
logger.Error("failed-to-serialize-payload", err)
return
}
req, err := http.NewRequest("POST", emitter.url, bytes.NewBuffer(payloadJSON))
if err != nil {
logger.Error("failed-to-construct-request", err)
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("X-Insert-Key", emitter.apikey)
resp, err := emitter.client.Do(req)
if err != nil {
logger.Error("failed-to-send-request", err)
return
}
resp.Body.Close()
}
func (emitter *NewRelicEmitter) Emit(logger lager.Logger, event metric.Event) {
payload := make(fullPayload, 0)
switch event.Name {
// These are the simple ones that only need a small name transformation
case "build started",
"build finished",
"worker containers",
"worker volumes",
"http response time",
"database queries",
"database connections":
payload = append(payload, emitter.simplePayload(logger, event, ""))
// These are periodic metrics that are consolidated and only emitted once
// per cycle (the emit trigger is chosen because it's currently last in the
// periodic list, so we should have a coherent view). We do this because
// new relic has a hard limit on the total number of metrics in a 24h
// period, so batching similar data where possible makes sense.
case "containers deleted":
emitter.containers.deleted = event.Value
case "containers created":
emitter.containers.created = event.Value
case "failed containers":
newPayload := emitter.simplePayload(logger, event, "containers")
newPayload["failed"] = newPayload["value"]
newPayload["created"] = emitter.containers.created
newPayload["deleted"] = emitter.containers.deleted
delete(newPayload, "value")
payload = append(payload, newPayload)
case "volumes deleted":
emitter.volumes.deleted = event.Value
case "volumes created":
emitter.volumes.created = event.Value
case "failed volumes":
newPayload := emitter.simplePayload(logger, event, "volumes")
newPayload["failed"] = newPayload["value"]
newPayload["created"] = emitter.volumes.created
newPayload["deleted"] = emitter.volumes.deleted
delete(newPayload, "value")
payload = append(payload, newPayload)
// And a couple that need a small rename (new relic doesn't like some chars)
case "scheduling: full duration (ms)":
payload = append(payload, emitter.simplePayload(logger, event, "scheduling_full_duration_ms"))
case "scheduling: loading versions duration (ms)":
payload = append(payload, emitter.simplePayload(logger, event, "scheduling_load_duration_ms"))
case "scheduling: job duration (ms)":
payload = append(payload, emitter.simplePayload(logger, event, "scheduling_job_duration_ms"))
default:
// Ignore the rest
}
// But also log any metric that's not EventStateOK, even if we're not
// otherwise recording it. (This won't be easily graphable, that's okay,
// this is more for monitoring synthetics)
if event.State != metric.EventStateOK {
singlePayload := emitter.simplePayload(logger, event, "alert")
// We don't have friendly names for all the metrics, and part of the
// point of this alert is to catch events we should be logging but
// didn't; therefore, be consistently inconsistent and use the
// concourse metric names, not our translation layer.
singlePayload["metric"] = event.Name
payload = append(payload, singlePayload)
}
if len(payload) > 0 {
emitter.emitPayload(logger, payload)
}
}