This repository has been archived by the owner on Aug 3, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 1
/
cluster.go
136 lines (110 loc) · 2.7 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
package arrebato
import (
"context"
"sync"
"github.com/hashicorp/go-multierror"
"google.golang.org/grpc"
nodesvc "github.com/davidsbond/arrebato/internal/proto/arrebato/node/service/v1"
)
type cluster struct {
mux *sync.RWMutex
leaderNode *grpc.ClientConn
nodes map[string]*grpc.ClientConn
}
func newCluster(ctx context.Context, connections []*grpc.ClientConn) (*cluster, error) {
nodes := make(map[string]*grpc.ClientConn)
for _, connection := range connections {
resp, err := nodesvc.NewNodeServiceClient(connection).Describe(ctx, &nodesvc.DescribeRequest{})
if err != nil {
return nil, err
}
nodes[resp.GetNode().GetName()] = connection
}
cl := &cluster{nodes: nodes, mux: &sync.RWMutex{}}
cl.findLeader(ctx)
return cl, nil
}
func (c *cluster) any() *grpc.ClientConn {
c.mux.RLock()
defer c.mux.RUnlock()
// Map ordering is non-deterministic, we can just pick the first one we get
// in the range.
for _, conn := range c.nodes {
return conn
}
return nil
}
func (c *cluster) leader() *grpc.ClientConn {
c.mux.RLock()
defer c.mux.RUnlock()
return c.leaderNode
}
func (c *cluster) named(name string) (*grpc.ClientConn, bool) {
c.mux.RLock()
defer c.mux.RUnlock()
conn, ok := c.nodes[name]
return conn, ok
}
func (c *cluster) topicOwner(ctx context.Context, topic string) (*grpc.ClientConn, error) {
var selected *grpc.ClientConn
err := c.forEach(ctx, func(conn *grpc.ClientConn) error {
if selected != nil {
return nil
}
n, err := nodesvc.NewNodeServiceClient(conn).Describe(ctx, &nodesvc.DescribeRequest{})
if err != nil {
return err
}
for _, tp := range n.GetNode().GetTopics() {
if tp == topic {
selected = conn
return nil
}
}
return nil
})
switch {
case err != nil:
return nil, err
case selected == nil:
return nil, ErrNoTopic
default:
return selected, nil
}
}
func (c *cluster) findLeader(ctx context.Context) {
c.mux.Lock()
defer c.mux.Unlock()
for _, connection := range c.nodes {
resp, err := nodesvc.NewNodeServiceClient(connection).Describe(ctx, &nodesvc.DescribeRequest{})
if err != nil {
continue
}
if resp.GetNode().GetLeader() {
c.leaderNode = connection
return
}
}
}
func (c *cluster) forEach(ctx context.Context, fn func(conn *grpc.ClientConn) error) error {
c.mux.RLock()
defer c.mux.RUnlock()
for _, connection := range c.nodes {
if ctx.Err() != nil {
return ctx.Err()
}
if err := fn(connection); err != nil {
return err
}
}
return nil
}
func (c *cluster) Close() error {
errs := make([]error, 0)
for _, connection := range c.nodes {
if err := connection.Close(); err != nil {
errs = append(errs, err)
}
}
return multierror.Append(nil, errs...).ErrorOrNil()
}