/
kind.go
194 lines (174 loc) · 5.66 KB
/
kind.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
package main
import (
"bytes"
"fmt"
"os/exec"
"strings"
"time"
log "github.com/sirupsen/logrus"
kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4"
"sigs.k8s.io/kind/pkg/cluster"
"sigs.k8s.io/kind/pkg/cmd"
load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image"
)
var provider *cluster.Provider
type kubevipManifestValues struct {
ControlPlaneVIP string
ImagePath string
}
type nodeAddresses struct {
node string
addresses []string
}
func (config *testConfig) createKind() error {
clusterConfig := kindconfigv1alpha4.Cluster{
Networking: kindconfigv1alpha4.Networking{
IPFamily: kindconfigv1alpha4.IPv4Family,
},
Nodes: []kindconfigv1alpha4.Node{
{
Role: kindconfigv1alpha4.ControlPlaneRole,
},
},
}
if config.IPv6 {
// Change Networking Family
clusterConfig.Networking.IPFamily = kindconfigv1alpha4.IPv6Family
}
if config.Dualstack {
// Change Networking Family
clusterConfig.Networking.IPFamily = kindconfigv1alpha4.DualStackFamily
}
if config.ControlPlane {
err := config.manifestGen()
if err != nil {
return err
}
// Add two additional control plane nodes (3)
clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole})
clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole})
// Add the extra static pod manifest
mount := kindconfigv1alpha4.Mount{
HostPath: config.ManifestPath,
ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml",
}
for x := range clusterConfig.Nodes {
if clusterConfig.Nodes[x].Role == kindconfigv1alpha4.ControlPlaneRole {
clusterConfig.Nodes[x].ExtraMounts = append(clusterConfig.Nodes[x].ExtraMounts, mount)
}
}
} else {
// Add three additional worker nodes
clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole})
clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole})
clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole})
}
provider = cluster.NewProvider(cluster.ProviderWithLogger(cmd.NewLogger()), cluster.ProviderWithDocker())
clusters, err := provider.List()
if err != nil {
return err
}
found := false
for x := range clusters {
if clusters[x] == "services" {
log.Infof("Cluster already exists")
found = true
}
}
if !found {
err := provider.Create("services", cluster.CreateWithV1Alpha4Config(&clusterConfig))
if err != nil {
return err
}
loadImageCmd := load.NewCommand(cmd.NewLogger(), cmd.StandardIOStreams())
loadImageCmd.SetArgs([]string{"--name", "services", config.ImagePath})
err = loadImageCmd.Execute()
if err != nil {
return err
}
nodes, err := provider.ListNodes("services")
if err != nil {
return err
}
if !config.skipHostnameChange {
log.Infof("⚙️ changing hostnames on nodes to force using proper node names for service selection")
for _, node := range nodes {
nodeName := node.String()
cmd := exec.Command("docker", "exec", nodeName, "hostname", nodeName+"-modified")
if _, err := cmd.CombinedOutput(); err != nil {
return err
}
}
}
// HMMM, if we want to run workloads on the control planes (todo)
if config.ControlPlane {
for _, node := range nodes {
nodeName := node.String()
cmd := exec.Command("kubectl", "taint", "nodes", nodeName, "node-role.kubernetes.io/control-plane:NoSchedule-")
_, _ = cmd.CombinedOutput()
}
}
globalRange := "172.18.100.10-172.18.100.30"
if config.IPv6 {
globalRange = "fd34:70db:8529:1e3d:0000:0000:0000:0010-fd34:70db:8529:1e3d:0000:0000:0000:0030"
}
cmd := exec.Command("kubectl", "create", "configmap", "--namespace", "kube-system", "kubevip", "--from-literal", "range-global="+globalRange)
if _, err := cmd.CombinedOutput(); err != nil {
return err
}
cmd = exec.Command("kubectl", "create", "-f", "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml")
if _, err := cmd.CombinedOutput(); err != nil {
return err
}
cmd = exec.Command("kubectl", "create", "-f", "https://kube-vip.io/manifests/rbac.yaml")
if _, err := cmd.CombinedOutput(); err != nil {
return err
}
log.Infof("💤 sleeping for a few seconds to let controllers start")
time.Sleep(time.Second * 5)
}
return nil
}
func deleteKind() error {
log.Info("🧽 deleting Kind cluster")
return provider.Delete("services", "")
}
func getAddressesOnNodes() ([]nodeAddresses, error) {
nodesConfig := []nodeAddresses{}
nodes, err := provider.ListNodes("services")
if err != nil {
return nodesConfig, err
}
for x := range nodes {
var b bytes.Buffer
exec := nodes[x].Command("hostname", "--all-ip-addresses")
exec.SetStderr(&b)
exec.SetStdin(&b)
exec.SetStdout(&b)
err = exec.Run()
if err != nil {
return nodesConfig, err
}
nodesConfig = append(nodesConfig, nodeAddresses{
node: nodes[x].String(),
addresses: strings.Split(b.String(), " "),
})
}
return nodesConfig, nil
}
func checkNodesForDuplicateAddresses(nodes []nodeAddresses, address string) error {
var foundOnNode []string
// Iterate over all nodes to find addresses, where there is an address match add to array
for x := range nodes {
for y := range nodes[x].addresses {
if nodes[x].addresses[y] == address {
foundOnNode = append(foundOnNode, nodes[x].node)
}
}
}
// If one address is on multiple nodes, then something has gone wrong
if len(foundOnNode) > 1 {
return fmt.Errorf("‼️ multiple nodes [%s] have address [%s]", strings.Join(foundOnNode, " "), address)
}
return nil
}