-
Notifications
You must be signed in to change notification settings - Fork 567
/
segment.go
103 lines (95 loc) · 3.8 KB
/
segment.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
package metrics
import (
"fmt"
"time"
"github.com/segmentio/analytics-go"
)
const reportingInterval time.Duration = 60 * time.Minute
func newPersistentClient() *analytics.Client {
c := newSegmentClient()
c.Interval = reportingInterval
c.Size = 100
return c
}
func newSegmentClient() *analytics.Client {
return analytics.New("hhxbyr7x50w3jtgcwcZUyOFrTf4VNMrD")
}
func reportClusterMetricsToSegment(client *analytics.Client, metrics *Metrics) {
client.Track(&analytics.Track{ //nolint:errcheck // ignoring error because metrics code is non-critical
Event: "cluster.metrics",
AnonymousId: metrics.ClusterId,
Properties: map[string]interface{}{
"PodID": metrics.PodId,
"nodes": metrics.Nodes,
"version": metrics.Version,
"repos": metrics.Repos,
"commits": metrics.Commits,
"files": metrics.Files,
"bytes": metrics.Bytes,
"jobs": metrics.Jobs,
"pipelines": metrics.Pipelines,
"ActivationCode": metrics.ActivationCode,
"MaxBranches": metrics.MaxBranches,
"PpsSpout": metrics.PpsSpout,
"PpsSpoutService": metrics.PpsSpoutService,
"CfgEgress": metrics.CfgEgress,
"CfgStandby": metrics.CfgStandby,
"CfgS3Gateway": metrics.CfgS3Gateway,
"CfgServices": metrics.CfgServices,
"CfgErrcmd": metrics.CfgErrcmd,
"CfgTfjob": metrics.CfgTfjob,
"InputGroup": metrics.InputGroup,
"InputJoin": metrics.InputJoin,
"InputCross": metrics.InputCross,
"InputUnion": metrics.InputUnion,
"InputCron": metrics.InputCron,
"InputGit": metrics.InputGroup,
"InputPfs": metrics.InputPfs,
"InputCommit": metrics.InputCommit,
"InputJoinOn": metrics.InputJoinOn,
"InputOuterJoin": metrics.InputOuterJoin,
"InputLazy": metrics.InputLazy,
"InputEmptyFiles": metrics.InputEmptyFiles,
"InputS3": metrics.InputS3,
"InputTrigger": metrics.InputTrigger,
"ResourceCpuReq": metrics.ResourceCpuReq,
"ResourceCpuReqMax": metrics.ResourceCpuReqMax,
"ResourceMemReq": metrics.ResourceMemReq,
"ResourceGpuReq": metrics.ResourceGpuReq,
"ResourceGpuReqMax": metrics.ResourceGpuReqMax,
"ResourceDiskReq": metrics.ResourceDiskReq,
"ResourceCpuLimit": metrics.ResourceCpuLimit,
"ResourceCpuLimitMax": metrics.ResourceCpuLimitMax,
"ResourceMemLimit": metrics.ResourceMemLimit,
"ResourceGpuLimit": metrics.ResourceGpuLimit,
"ResourceGpuLimitMax": metrics.ResourceGpuLimitMax,
"ResourceDiskLimit": metrics.ResourceDiskLimit,
"MaxParallelism": metrics.MaxParallelism,
"MinParallelism": metrics.MinParallelism,
"NumParallelism": metrics.NumParallelism,
"PipelineWithAlerts": metrics.PipelineWithAlerts,
},
})
}
/*
Segment needs us to identify a user before we report any events for that user.
We have no way of knowing if a user has previously been identified, so we call this
before every `Track()` call containing user data.
*/
func identifyUser(client *analytics.Client, userID string) {
client.Identify(&analytics.Identify{ //nolint:errcheck // ignoring error because metrics code is non-critical
UserId: userID,
})
}
func reportUserMetricsToSegment(client *analytics.Client, userID string, prefix string, action string, value interface{}, clusterID string) {
identifyUser(client, userID)
properties := map[string]interface{}{
"ClusterID": clusterID,
}
properties[action] = value
client.Track(&analytics.Track{ //nolint:errcheck // ignoring error because metrics code is non-critical
Event: fmt.Sprintf("%v.usage", prefix),
UserId: userID,
Properties: properties,
})
}