forked from gruntwork-io/terratest
-
Notifications
You must be signed in to change notification settings - Fork 0
/
node.go
124 lines (110 loc) · 4.02 KB
/
node.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
package k8s
import (
"errors"
"testing"
"time"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/gruntwork-io/terratest/modules/logger"
"github.com/gruntwork-io/terratest/modules/retry"
)
// GetNodes queries Kubernetes for information about the worker nodes registered to the cluster. If anything goes wrong,
// the function will automatically fail the test.
func GetNodes(t *testing.T, options *KubectlOptions) []corev1.Node {
nodes, err := GetNodesE(t, options)
require.NoError(t, err)
return nodes
}
// GetNodesE queries Kubernetes for information about the worker nodes registered to the cluster.
func GetNodesE(t *testing.T, options *KubectlOptions) ([]corev1.Node, error) {
logger.Logf(t, "Getting list of nodes from Kubernetes")
clientset, err := GetKubernetesClientFromOptionsE(t, options)
if err != nil {
return nil, err
}
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
return nodes.Items, err
}
// GetReadyNodes queries Kubernetes for information about the worker nodes registered to the cluster and only returns
// those that are in the ready state. If anything goes wrong, the function will automatically fail the test.
func GetReadyNodes(t *testing.T, options *KubectlOptions) []corev1.Node {
nodes, err := GetReadyNodesE(t, options)
require.NoError(t, err)
return nodes
}
// GetReadyNodesE queries Kubernetes for information about the worker nodes registered to the cluster and only returns
// those that are in the ready state.
func GetReadyNodesE(t *testing.T, options *KubectlOptions) ([]corev1.Node, error) {
nodes, err := GetNodesE(t, options)
if err != nil {
return nil, err
}
logger.Logf(t, "Filtering list of nodes from Kubernetes for Ready nodes")
nodesFiltered := []corev1.Node{}
for _, node := range nodes {
if IsNodeReady(node) {
nodesFiltered = append(nodesFiltered, node)
}
}
return nodesFiltered, nil
}
// IsNodeReady takes a Kubernetes Node information object and checks if the Node is in the ready state.
func IsNodeReady(node corev1.Node) bool {
for _, condition := range node.Status.Conditions {
if condition.Type == corev1.NodeReady {
return condition.Status == corev1.ConditionTrue
}
}
return false
}
// WaitUntilAllNodesReady continuously polls the Kubernetes cluster until all nodes in the cluster reach the ready
// state, or runs out of retries. Will fail the test immediately if it times out.
func WaitUntilAllNodesReady(t *testing.T, options *KubectlOptions, retries int, sleepBetweenRetries time.Duration) {
err := WaitUntilAllNodesReadyE(t, options, retries, sleepBetweenRetries)
require.NoError(t, err)
}
// WaitUntilAllNodesReadyE continuously polls the Kubernetes cluster until all nodes in the cluster reach the ready
// state, or runs out of retries.
func WaitUntilAllNodesReadyE(t *testing.T, options *KubectlOptions, retries int, sleepBetweenRetries time.Duration) error {
message, err := retry.DoWithRetryE(
t,
"Wait for all Kube Nodes to be ready",
retries,
sleepBetweenRetries,
func() (string, error) {
_, err := AreAllNodesReadyE(t, options)
if err != nil {
return "", err
}
return "All nodes ready", nil
},
)
logger.Logf(t, message)
return err
}
// AreAllNodesReady checks if all nodes are ready in the Kubernetes cluster targeted by the current config context
func AreAllNodesReady(t *testing.T, options *KubectlOptions) bool {
nodesReady, _ := AreAllNodesReadyE(t, options)
return nodesReady
}
// AreAllNodesReadyE checks if all nodes are ready in the Kubernetes cluster targeted by the current config context. If
// false, returns an error indicating the reason.
func AreAllNodesReadyE(t *testing.T, options *KubectlOptions) (bool, error) {
nodes, err := GetNodesE(t, options)
if err != nil {
return false, err
}
if len(nodes) == 0 {
return false, errors.New("No nodes available")
}
for _, node := range nodes {
if !IsNodeReady(node) {
return false, errors.New("Not all nodes ready")
}
}
return true, nil
}