-
Notifications
You must be signed in to change notification settings - Fork 26
/
node.go
87 lines (71 loc) · 2.71 KB
/
node.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
Copyright 2021-2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// RoleWorker contains the worker role
RoleWorker = "worker"
)
const (
// LabelRole contains the key for the role label
LabelRole = "node-role.kubernetes.io"
// LabelHostname contains the key for the hostname label
LabelHostname = "kubernetes.io/hostname"
)
// GetWorkerNodes returns all nodes labeled as worker
func GetWorkerNodes(ctx context.Context, f *framework.Framework) ([]corev1.Node, error) {
return GetNodesByRole(ctx, f, RoleWorker)
}
// GetByRole returns all nodes with the specified role
func GetNodesByRole(ctx context.Context, f *framework.Framework, role string) ([]corev1.Node, error) {
selector, err := labels.Parse(fmt.Sprintf("%s/%s=", LabelRole, role))
if err != nil {
return nil, err
}
return GetNodesBySelector(ctx, f, selector)
}
// GetBySelector returns all nodes with the specified selector
func GetNodesBySelector(ctx context.Context, f *framework.Framework, selector labels.Selector) ([]corev1.Node, error) {
nodes, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return nil, err
}
return nodes.Items, nil
}
// FilterNodesWithEnoughCores returns all nodes with at least the amount of given CPU allocatable
func FilterNodesWithEnoughCores(nodes []corev1.Node, cpuAmount string) ([]corev1.Node, error) {
requestCpu := resource.MustParse(cpuAmount)
framework.Logf("checking request %v on %d nodes", requestCpu, len(nodes))
resNodes := []corev1.Node{}
for _, node := range nodes {
availCpu, ok := node.Status.Allocatable[corev1.ResourceCPU]
if !ok || availCpu.IsZero() {
return nil, fmt.Errorf("node %q has no allocatable CPU", node.Name)
}
if availCpu.Cmp(requestCpu) < 1 {
framework.Logf("node %q available cpu %v requested cpu %v", node.Name, availCpu, requestCpu)
continue
}
framework.Logf("node %q has enough resources, cluster OK", node.Name)
resNodes = append(resNodes, node)
}
return resNodes, nil
}