This repository has been archived by the owner on Nov 5, 2021. It is now read-only.
/
prober.go
299 lines (255 loc) · 8.64 KB
/
prober.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
// Copyright 2017-2019 The Cloudprober Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package prober provides a prober for running a set of probes.
Prober takes in a config proto which dictates what probes should be created
with what configuration, and manages the asynchronous fan-in/fan-out of the
metrics data from these probes.
*/
package prober
import (
"context"
"fmt"
"math/rand"
"regexp"
"sync"
"time"
"github.com/golang/glog"
configpb "github.com/google/cloudprober/config/proto"
"github.com/google/cloudprober/config/runconfig"
"github.com/google/cloudprober/logger"
"github.com/google/cloudprober/metrics"
spb "github.com/google/cloudprober/prober/proto"
"github.com/google/cloudprober/probes"
"github.com/google/cloudprober/probes/options"
probes_configpb "github.com/google/cloudprober/probes/proto"
rdsserver "github.com/google/cloudprober/rds/server"
"github.com/google/cloudprober/servers"
"github.com/google/cloudprober/surfacers"
"github.com/google/cloudprober/sysvars"
"github.com/google/cloudprober/targets"
"github.com/google/cloudprober/targets/endpoint"
"github.com/google/cloudprober/targets/lameduck"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Prober represents a collection of probes where each probe implements the Probe interface.
type Prober struct {
Probes map[string]*probes.ProbeInfo
Servers []*servers.ServerInfo
c *configpb.ProberConfig
l *logger.Logger
mu sync.Mutex
ldLister endpoint.Lister
Surfacers []*surfacers.SurfacerInfo
// Probe channel to handle starting of the new probes.
grpcStartProbeCh chan string
// Per-probe cancelFunc map.
probeCancelFunc map[string]context.CancelFunc
// dataChan for passing metrics between probes and main goroutine.
dataChan chan *metrics.EventMetrics
// Used by GetConfig for /config handler.
TextConfig string
}
func runOnThisHost(runOn string, hostname string) (bool, error) {
if runOn == "" {
return true, nil
}
r, err := regexp.Compile(runOn)
if err != nil {
return false, err
}
return r.MatchString(hostname), nil
}
func (pr *Prober) addProbe(p *probes_configpb.ProbeDef) error {
pr.mu.Lock()
defer pr.mu.Unlock()
// Check if this probe is supposed to run here.
runHere, err := runOnThisHost(p.GetRunOn(), sysvars.Vars()["hostname"])
if err != nil {
return err
}
if !runHere {
return nil
}
if pr.Probes[p.GetName()] != nil {
return status.Errorf(codes.AlreadyExists, "probe %s is already defined", p.GetName())
}
opts, err := options.BuildProbeOptions(p, pr.ldLister, pr.c.GetGlobalTargetsOptions(), pr.l)
if err != nil {
return status.Errorf(codes.Unknown, err.Error())
}
pr.l.Infof("Creating a %s probe: %s", p.GetType(), p.GetName())
probeInfo, err := probes.CreateProbe(p, opts)
if err != nil {
return status.Errorf(codes.Unknown, err.Error())
}
pr.Probes[p.GetName()] = probeInfo
return nil
}
// Init initialize prober with the given config file.
func (pr *Prober) Init(ctx context.Context, cfg *configpb.ProberConfig, l *logger.Logger) error {
pr.c = cfg
pr.l = l
// Initialize cloudprober gRPC service if configured.
srv := runconfig.DefaultGRPCServer()
if srv != nil {
pr.grpcStartProbeCh = make(chan string)
spb.RegisterCloudproberServer(srv, pr)
}
// Initialize RDS server, if configured and attach to the default gRPC server.
// Note that we can still attach services to the default gRPC server as it's
// started later in Start().
if c := pr.c.GetRdsServer(); c != nil {
l, err := logger.NewCloudproberLog("rds-server")
if err != nil {
return err
}
rdsServer, err := rdsserver.New(ctx, c, nil, l)
if err != nil {
return err
}
runconfig.SetLocalRDSServer(rdsServer)
if srv != nil {
rdsServer.RegisterWithGRPC(srv)
}
}
// Initialize lameduck lister
globalTargetsOpts := pr.c.GetGlobalTargetsOptions()
if globalTargetsOpts.GetLameDuckOptions() != nil {
ldLogger, err := logger.NewCloudproberLog("lame-duck")
if err != nil {
return fmt.Errorf("error in initializing lame-duck logger: %v", err)
}
if err := lameduck.InitDefaultLister(globalTargetsOpts, nil, ldLogger); err != nil {
return err
}
pr.ldLister, err = lameduck.GetDefaultLister()
if err != nil {
pr.l.Warningf("Error while getting default lameduck lister, lameduck behavior will be disabled. Err: %v", err)
}
}
var err error
// Initialize shared targets
for _, st := range pr.c.GetSharedTargets() {
tgts, err := targets.New(st.GetTargets(), pr.ldLister, globalTargetsOpts, pr.l, pr.l)
if err != nil {
return err
}
targets.SetSharedTargets(st.GetName(), tgts)
}
// Initiliaze probes
pr.Probes = make(map[string]*probes.ProbeInfo)
pr.probeCancelFunc = make(map[string]context.CancelFunc)
for _, p := range pr.c.GetProbe() {
if err := pr.addProbe(p); err != nil {
return err
}
}
// Initialize servers
pr.Servers, err = servers.Init(ctx, pr.c.GetServer())
if err != nil {
return err
}
pr.Surfacers, err = surfacers.Init(ctx, pr.c.GetSurfacer())
if err != nil {
return err
}
return nil
}
// Start starts a previously initialized Cloudprober.
func (pr *Prober) Start(ctx context.Context) {
pr.dataChan = make(chan *metrics.EventMetrics, 100000)
go func() {
var em *metrics.EventMetrics
for {
em = <-pr.dataChan
var s = em.String()
if len(s) > logger.MaxLogEntrySize {
glog.Warningf("Metric entry for timestamp %v dropped due to large size: %d", em.Timestamp, len(s))
continue
}
// Replicate the surfacer message to every surfacer we have
// registered. Note that s.Write() is expected to be
// non-blocking to avoid blocking of EventMetrics message
// processing.
for _, surfacer := range pr.Surfacers {
surfacer.Write(context.Background(), em)
}
}
}()
// Start a goroutine to export system variables
go sysvars.Start(ctx, pr.dataChan, time.Millisecond*time.Duration(pr.c.GetSysvarsIntervalMsec()), pr.c.GetSysvarsEnvVar())
// Start servers, each in its own goroutine
for _, s := range pr.Servers {
go s.Start(ctx, pr.dataChan)
}
if pr.c.GetDisableJitter() {
for name := range pr.Probes {
go pr.startProbe(ctx, name)
}
return
}
pr.startProbesWithJitter(ctx)
if runconfig.DefaultGRPCServer() != nil {
// Start a goroutine to handle starting of the probes added through gRPC.
// AddProbe adds new probes to the pr.grpcStartProbeCh channel and this
// goroutine reads from that channel and starts the probe using the overall
// Start context.
go func() {
for {
select {
case name := <-pr.grpcStartProbeCh:
pr.startProbe(ctx, name)
}
}
}()
}
}
func (pr *Prober) startProbe(ctx context.Context, name string) {
pr.mu.Lock()
defer pr.mu.Unlock()
probeCtx, cancelFunc := context.WithCancel(ctx)
pr.probeCancelFunc[name] = cancelFunc
go pr.Probes[name].Start(probeCtx, pr.dataChan)
}
// startProbesWithJitter try to space out probes over time, as much as possible,
// without making it too complicated. We arrange probes into interval buckets -
// all probes with the same interval will be part of the same bucket, and we
// then spread out probes within that interval by introducing a delay of
// interval / len(probes) between probes. We also introduce a random jitter
// between different interval buckets.
func (pr *Prober) startProbesWithJitter(ctx context.Context) {
// Seed random number generator.
rand.Seed(time.Now().UnixNano())
// Make interval -> [probe1, probe2, probe3..] map
intervalBuckets := make(map[time.Duration][]*probes.ProbeInfo)
for _, p := range pr.Probes {
intervalBuckets[p.Options.Interval] = append(intervalBuckets[p.Options.Interval], p)
}
for interval, probeInfos := range intervalBuckets {
go func(interval time.Duration, probeInfos []*probes.ProbeInfo) {
// Introduce a random jitter between interval buckets.
randomDelayMsec := rand.Int63n(int64(interval.Seconds() * 1000))
time.Sleep(time.Duration(randomDelayMsec) * time.Millisecond)
interProbeDelay := interval / time.Duration(len(probeInfos))
// Spread out probes evenly with an interval bucket.
for _, p := range probeInfos {
pr.l.Info("Starting probe: ", p.Name)
go pr.startProbe(ctx, p.Name)
time.Sleep(interProbeDelay)
}
}(interval, probeInfos)
}
}