/
throttler.go
154 lines (133 loc) · 4.24 KB
/
throttler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
// Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package loadshedding
import (
"context"
"fmt"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"istio.io/istio/pkg/log"
)
const (
// Disabled removes all throttling behavior for the server.
Disabled ThrottlerMode = iota
// LogOnly enables an advisory mode for throttling behavior on the server.
LogOnly
// Enforce turns the throttling behavior on for the server.
Enforce
)
type (
// ThrottlerMode controls the behavior a throttler.
ThrottlerMode int
// RequestInfo is used to hold information related to a request that
// could be relevant to a LoadEvaluator.
RequestInfo struct {
// PredictedCost enables the server to pass information about the relative
// size (or impact) of the request into the throttler. For instance, it can
// be used to distinguish between Check() and Report() calls by setting the
// value to the size of the batch.
PredictedCost float64
}
// Throttler provides the loadshedding behavior by evaluating current request information
// against a set of configured LoadEvaluators.
Throttler struct {
mode ThrottlerMode
evaluators map[string]LoadEvaluator
thresholds map[string]float64
}
)
var (
scope = log.RegisterScope("loadshedding", "Information related to loadshedding", 0)
modesToString = map[ThrottlerMode]string{
Disabled: "disabled",
LogOnly: "logonly",
Enforce: "enforce",
}
stringToModes = map[string]ThrottlerMode{
"disabled": Disabled,
"logonly": LogOnly,
"enforce": Enforce,
}
throttled = stats.Int64(
"loadshedding/requests_throttled",
"The number of requests that have been dropped by the loadshedder.",
stats.UnitDimensionless)
throttledView = &view.View{
Name: "mixer/" + throttled.Name(),
Measure: throttled,
Aggregation: view.Count(),
}
)
func init() {
if err := view.Register(throttledView); err != nil {
panic(err)
}
}
// NewThrottler builds a Throttler based on the configured options.
func NewThrottler(opts Options) *Throttler {
t := &Throttler{
mode: opts.Mode,
evaluators: make(map[string]LoadEvaluator),
thresholds: make(map[string]float64),
}
if t.mode == Disabled {
// don't bother to create Evaluators if throttling is not
// enabled.
return t
}
if opts.AverageLatencyThreshold > 0 {
e := NewGRPCLatencyEvaluator(opts.SamplesPerSecond, opts.SampleHalfLife)
t.evaluators[e.Name()] = e
t.thresholds[e.Name()] = opts.AverageLatencyThreshold.Seconds()
}
if opts.MaxRequestsPerSecond > 0 {
e := NewRateLimitEvaluator(opts.MaxRequestsPerSecond, opts.BurstSize)
t.evaluators[e.Name()] = e
t.thresholds[e.Name()] = float64(opts.MaxRequestsPerSecond)
}
scope.Debugf("Built Throttler(%#v) from opts(%#v)", t, opts)
return t
}
// Evaluator returns a configured LoadEvaluator based on the supplied name. If no
// LoadEvaluator with the given name is known to the Throttler, a nil value will
// be returned.
func (t *Throttler) Evaluator(name string) LoadEvaluator {
return t.evaluators[name]
}
// Throttle returns a verdict on whether or not the server should drop the request, based on
// the current set of configured LoadEvaluators.
func (t *Throttler) Throttle(ri RequestInfo) bool {
if t.mode == Disabled {
return false
}
for _, e := range t.evaluators {
thres, found := t.thresholds[e.Name()]
if !found {
continue
}
scope.Debugf("Evaluating load with %s against threshold %f", e.Name(), thres)
eval := e.EvaluateAgainst(ri, thres)
if ThresholdExceeded(eval) {
msg := fmt.Sprintf("Throttled (%s): '%s'", e.Name(), eval.Message)
if t.mode == LogOnly {
scope.Infoa("LogOnly - ", msg)
continue
}
stats.Record(context.Background(), throttled.M(1))
scope.Warn(msg)
return true
}
}
return false
}