forked from optiopay/kafka
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cluster.go
226 lines (200 loc) · 5.65 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
package integration
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strings"
"sync"
"testing"
)
type KafkaCluster struct {
// cluster size == number of kafka nodes
size int
kafkaDockerDir string
verbose bool
mu sync.Mutex
containers []*Container
}
type Container struct {
cluster *KafkaCluster
ID string `json:"Id"`
Image string
Args []string
Config struct {
Cmd []string
Env []string
ExposedPorts map[string]interface{}
}
NetworkSettings struct {
Gateway string
IPAddress string
Ports map[string][]PortMapping
}
}
type PortMapping struct {
HostIP string `json:"HostIp"`
HostPort string
}
func NewKafkaCluster(kafkaDockerDir string, size int) *KafkaCluster {
if size < 4 {
fmt.Fprintln(os.Stderr,
"WARNING: creating cluster smaller than 4 nodes is not sufficient for all topics")
}
return &KafkaCluster{
kafkaDockerDir: kafkaDockerDir,
size: size,
verbose: testing.Verbose(),
}
}
// RunningKafka returns true if container is running kafka node
func (c *Container) RunningKafka() bool {
return c.Args[1] == "start-kafka.sh"
}
// Start starts current container
func (c *Container) Start() error {
return c.cluster.ContainerStart(c.ID)
}
// Stop stops current container
func (c *Container) Stop() error {
return c.cluster.ContainerStop(c.ID)
}
func (c *Container) Kill() error {
return c.cluster.ContainerKill(c.ID)
}
// Start start zookeeper and kafka nodes using docker-compose command. Upon
// successful process spawn, cluster is scaled to required amount of nodes.
func (cluster *KafkaCluster) Start() error {
cluster.mu.Lock()
defer cluster.mu.Unlock()
// ensure cluster is not running
if err := cluster.Stop(); err != nil {
return fmt.Errorf("cannot ensure stop cluster: %s", err)
}
if err := cluster.removeStoppedContainers(); err != nil {
return fmt.Errorf("cannot cleanup dead containers: %s", err)
}
upCmd := cluster.cmd("docker-compose", "up", "-d")
if err := upCmd.Run(); err != nil {
return fmt.Errorf("docker-compose error: %s", err)
}
scaleCmd := cluster.cmd("docker-compose", "scale", fmt.Sprintf("kafka=%d", cluster.size))
if err := scaleCmd.Run(); err != nil {
_ = cluster.Stop()
return fmt.Errorf("cannot scale kafka: %s", err)
}
containers, err := cluster.Containers()
if err != nil {
_ = cluster.Stop()
return fmt.Errorf("cannot get containers info: %s", err)
}
cluster.containers = containers
return nil
}
// Containers inspect all containers running within cluster and return
// information about them.
func (cluster *KafkaCluster) Containers() ([]*Container, error) {
psCmd := cluster.cmd("docker-compose", "ps", "-q")
var buf bytes.Buffer
psCmd.Stdout = &buf
if err := psCmd.Run(); err != nil {
return nil, fmt.Errorf("cannot list processes: %s", err)
}
rd := bufio.NewReader(&buf)
inspectArgs := []string{"inspect"}
for {
line, err := rd.ReadString('\n')
if err != nil {
if err == io.EOF {
break
}
return nil, fmt.Errorf("cannot read \"ps\" output: %s", err)
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
inspectArgs = append(inspectArgs, line)
}
inspectCmd := cluster.cmd("docker", inspectArgs...)
buf.Reset()
inspectCmd.Stdout = &buf
if err := inspectCmd.Run(); err != nil {
return nil, fmt.Errorf("inspect failed: %s", err)
}
var containers []*Container
if err := json.NewDecoder(&buf).Decode(&containers); err != nil {
return nil, fmt.Errorf("cannot decode inspection: %s", err)
}
for _, c := range containers {
c.cluster = cluster
}
return containers, nil
}
// Stop stop all services running for the cluster by sending SIGINT to
// docker-compose process.
func (cluster *KafkaCluster) Stop() error {
cmd := cluster.cmd("docker-compose", "stop", "-t", "0")
if err := cmd.Run(); err != nil {
return fmt.Errorf("docker-compose stop failed: %s", err)
}
_ = cluster.removeStoppedContainers()
return nil
}
// KafkaAddrs return list of kafka node addresses as strings, in form
// <host>:<port>
func (cluster *KafkaCluster) KafkaAddrs() ([]string, error) {
containers, err := cluster.Containers()
if err != nil {
return nil, fmt.Errorf("cannot get containers info: %s", err)
}
addrs := make([]string, 0)
for _, container := range containers {
ports, ok := container.NetworkSettings.Ports["9092/tcp"]
if !ok || len(ports) == 0 {
continue
}
addrs = append(addrs, fmt.Sprintf("%s:%s", ports[0].HostIP, ports[0].HostPort))
}
return addrs, nil
}
func (cluster *KafkaCluster) ContainerStop(containerID string) error {
stopCmd := cluster.cmd("docker", "stop", containerID)
if err := stopCmd.Run(); err != nil {
return fmt.Errorf("cannot stop %q container: %s", containerID, err)
}
return nil
}
func (cluster *KafkaCluster) ContainerKill(containerID string) error {
killCmd := cluster.cmd("docker", "kill", containerID)
if err := killCmd.Run(); err != nil {
return fmt.Errorf("cannot kill %q container: %s", containerID, err)
}
return nil
}
func (cluster *KafkaCluster) ContainerStart(containerID string) error {
startCmd := cluster.cmd("docker", "start", containerID)
if err := startCmd.Run(); err != nil {
return fmt.Errorf("cannot start %q container: %s", containerID, err)
}
return nil
}
func (cluster *KafkaCluster) cmd(name string, args ...string) *exec.Cmd {
c := exec.Command(name, args...)
if cluster.verbose {
c.Stderr = os.Stderr
c.Stdout = os.Stdout
}
c.Dir = cluster.kafkaDockerDir
return c
}
func (cluster *KafkaCluster) removeStoppedContainers() error {
rmCmd := cluster.cmd("docker-compose", "rm", "-f")
if err := rmCmd.Run(); err != nil {
return fmt.Errorf("docker-compose rm error: %s", err)
}
return nil
}