-
Notifications
You must be signed in to change notification settings - Fork 2.1k
/
loadbalancer.go
214 lines (181 loc) · 5.77 KB
/
loadbalancer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package loadbalancingexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/loadbalancingexporter"
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"go.opentelemetry.io/collector/component"
"go.opentelemetry.io/collector/exporter"
"go.uber.org/zap"
)
const (
defaultPort = "4317"
)
var (
errNoResolver = errors.New("no resolvers specified for the exporter")
errMultipleResolversProvided = errors.New("only one resolver should be specified")
)
type componentFactory func(ctx context.Context, endpoint string) (component.Component, error)
type loadBalancer struct {
logger *zap.Logger
host component.Host
res resolver
ring *hashRing
componentFactory componentFactory
exporters map[string]*wrappedExporter
stopped bool
updateLock sync.RWMutex
}
// Create new load balancer
func newLoadBalancer(params exporter.CreateSettings, cfg component.Config, factory componentFactory) (*loadBalancer, error) {
oCfg := cfg.(*Config)
var count = 0
if oCfg.Resolver.DNS != nil {
count++
}
if oCfg.Resolver.Static != nil {
count++
}
if oCfg.Resolver.AWSCloudMap != nil {
count++
}
if oCfg.Resolver.K8sSvc != nil {
count++
}
if count > 1 {
return nil, errMultipleResolversProvided
}
var res resolver
if oCfg.Resolver.Static != nil {
var err error
res, err = newStaticResolver(oCfg.Resolver.Static.Hostnames)
if err != nil {
return nil, err
}
}
if oCfg.Resolver.DNS != nil {
dnsLogger := params.Logger.With(zap.String("resolver", "dns"))
var err error
res, err = newDNSResolver(dnsLogger, oCfg.Resolver.DNS.Hostname, oCfg.Resolver.DNS.Port, oCfg.Resolver.DNS.Interval, oCfg.Resolver.DNS.Timeout)
if err != nil {
return nil, err
}
}
if oCfg.Resolver.K8sSvc != nil {
k8sLogger := params.Logger.With(zap.String("resolver", "k8s service"))
clt, err := newInClusterClient()
if err != nil {
return nil, err
}
res, err = newK8sResolver(clt, k8sLogger, oCfg.Resolver.K8sSvc.Service, oCfg.Resolver.K8sSvc.Ports, oCfg.Resolver.K8sSvc.Timeout)
if err != nil {
return nil, err
}
}
if oCfg.Resolver.AWSCloudMap != nil {
awsCloudMapLogger := params.Logger.With(zap.String("resolver", "aws_cloud_map"))
var err error
res, err = newCloudMapResolver(awsCloudMapLogger, &oCfg.Resolver.AWSCloudMap.NamespaceName, &oCfg.Resolver.AWSCloudMap.ServiceName, oCfg.Resolver.AWSCloudMap.Port, &oCfg.Resolver.AWSCloudMap.HealthStatus, oCfg.Resolver.AWSCloudMap.Interval, oCfg.Resolver.AWSCloudMap.Timeout)
if err != nil {
return nil, err
}
}
if res == nil {
return nil, errNoResolver
}
return &loadBalancer{
logger: params.Logger,
res: res,
componentFactory: factory,
exporters: map[string]*wrappedExporter{},
}, nil
}
func (lb *loadBalancer) Start(ctx context.Context, host component.Host) error {
lb.res.onChange(lb.onBackendChanges)
lb.host = host
return lb.res.start(ctx)
}
func (lb *loadBalancer) onBackendChanges(resolved []string) {
newRing := newHashRing(resolved)
if !newRing.equal(lb.ring) {
lb.updateLock.Lock()
defer lb.updateLock.Unlock()
lb.ring = newRing
// TODO: set a timeout?
ctx := context.Background()
// add the missing exporters first
lb.addMissingExporters(ctx, resolved)
lb.removeExtraExporters(ctx, resolved)
}
}
func (lb *loadBalancer) addMissingExporters(ctx context.Context, endpoints []string) {
for _, endpoint := range endpoints {
endpoint = endpointWithPort(endpoint)
if _, exists := lb.exporters[endpoint]; !exists {
exp, err := lb.componentFactory(ctx, endpoint)
if err != nil {
lb.logger.Error("failed to create new exporter for endpoint", zap.String("endpoint", endpoint), zap.Error(err))
continue
}
we := newWrappedExporter(exp)
if err = we.Start(ctx, lb.host); err != nil {
lb.logger.Error("failed to start new exporter for endpoint", zap.String("endpoint", endpoint), zap.Error(err))
continue
}
lb.exporters[endpoint] = we
}
}
}
func endpointWithPort(endpoint string) string {
if !strings.Contains(endpoint, ":") {
endpoint = fmt.Sprintf("%s:%s", endpoint, defaultPort)
}
return endpoint
}
func (lb *loadBalancer) removeExtraExporters(ctx context.Context, endpoints []string) {
endpointsWithPort := make([]string, len(endpoints))
for i, e := range endpoints {
endpointsWithPort[i] = endpointWithPort(e)
}
for existing := range lb.exporters {
if !endpointFound(existing, endpointsWithPort) {
exp := lb.exporters[existing]
// Shutdown the exporter asynchronously to avoid blocking the resolver
go func() {
_ = exp.Shutdown(ctx)
}()
delete(lb.exporters, existing)
}
}
}
func endpointFound(endpoint string, endpoints []string) bool {
for _, candidate := range endpoints {
if candidate == endpoint {
return true
}
}
return false
}
func (lb *loadBalancer) Shutdown(ctx context.Context) error {
err := lb.res.shutdown(ctx)
lb.stopped = true
return err
}
// exporterAndEndpoint returns the exporter and the endpoint for the given identifier.
func (lb *loadBalancer) exporterAndEndpoint(identifier []byte) (*wrappedExporter, string, error) {
// NOTE: make rolling updates of next tier of collectors work. currently, this may cause
// data loss because the latest batches sent to outdated backend will never find their way out.
// for details: https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/1690
lb.updateLock.RLock()
defer lb.updateLock.RUnlock()
endpoint := lb.ring.endpointFor(identifier)
exp, found := lb.exporters[endpointWithPort(endpoint)]
if !found {
// something is really wrong... how come we couldn't find the exporter??
return nil, "", fmt.Errorf("couldn't find the exporter for the endpoint %q", endpoint)
}
return exp, endpoint, nil
}