forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
hpa.go
192 lines (162 loc) · 7.37 KB
/
hpa.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
package analysis
import (
"fmt"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/apis/autoscaling"
kapi "k8s.io/kubernetes/pkg/apis/core"
graphapi "github.com/gonum/graph"
"github.com/gonum/graph/path"
appsgraph "github.com/openshift/origin/pkg/oc/graph/appsgraph"
appsnodes "github.com/openshift/origin/pkg/oc/graph/appsgraph/nodes"
osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph"
"github.com/openshift/origin/pkg/oc/graph/kubegraph"
kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph"
kubenodes "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes"
)
const (
// HPAMissingScaleRefError denotes an error where a Horizontal Pod Autoscaler does not have a reference to an object to scale
HPAMissingScaleRefError = "HPAMissingScaleRef"
// HPAMissingCPUTargetError denotes an error where a Horizontal Pod Autoscaler does not have a CPU target to scale by.
// Currently, the only supported scale metric is CPU utilization, so without this metric an HPA is useless.
HPAMissingCPUTargetError = "HPAMissingCPUTarget"
// HPAOverlappingScaleRefWarning denotes a warning where a Horizontal Pod Autoscaler scales an object that is scaled by some other object as well
HPAOverlappingScaleRefWarning = "HPAOverlappingScaleRef"
)
// FindHPASpecsMissingCPUTargets scans the graph in search of HorizontalPodAutoscalers that are missing a CPU utilization target.
// As of right now, the only metric that HPAs can use to scale pods is the CPU utilization, so if a HPA is missing this target it
// is effectively useless.
func FindHPASpecsMissingCPUTargets(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) {
node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode)
cpuFound := false
for _, metric := range node.HorizontalPodAutoscaler.Spec.Metrics {
if metric.Type == autoscaling.ResourceMetricSourceType && metric.Resource != nil && metric.Resource.Name == kapi.ResourceCPU {
cpuFound = true
break
}
}
if !cpuFound {
markers = append(markers, osgraph.Marker{
Node: node,
Severity: osgraph.ErrorSeverity,
Key: HPAMissingCPUTargetError,
Message: fmt.Sprintf("%s is missing a CPU utilization target", namer.ResourceName(node)),
Suggestion: osgraph.Suggestion(fmt.Sprintf(`oc patch %s -p '{"spec":{"targetCPUUtilizationPercentage": 80}}'`, namer.ResourceName(node))),
})
}
}
return markers
}
// FindHPASpecsMissingScaleRefs finds all Horizontal Pod Autoscalers whose scale reference points to an object that doesn't exist
// or that the client does not have the permission to see.
func FindHPASpecsMissingScaleRefs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastNode := range graph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind) {
node := uncastNode.(*kubenodes.HorizontalPodAutoscalerNode)
scaledObjects := graph.SuccessorNodesByEdgeKind(
uncastNode,
kubegraph.ScalingEdgeKind,
)
if len(scaledObjects) < 1 {
markers = append(markers, createMissingScaleRefMarker(node, nil, namer))
continue
}
for _, scaleRef := range scaledObjects {
if existenceChecker, ok := scaleRef.(osgraph.ExistenceChecker); ok && !existenceChecker.Found() {
// if this node is synthetic, we can't be sure that the HPA is scaling something that actually exists
markers = append(markers, createMissingScaleRefMarker(node, scaleRef, namer))
}
}
}
return markers
}
func createMissingScaleRefMarker(hpaNode *kubenodes.HorizontalPodAutoscalerNode, scaleRef graphapi.Node, namer osgraph.Namer) osgraph.Marker {
return osgraph.Marker{
Node: hpaNode,
Severity: osgraph.ErrorSeverity,
RelatedNodes: []graphapi.Node{scaleRef},
Key: HPAMissingScaleRefError,
Message: fmt.Sprintf("%s is attempting to scale %s/%s, which doesn't exist",
namer.ResourceName(hpaNode),
hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Kind,
hpaNode.HorizontalPodAutoscaler.Spec.ScaleTargetRef.Name,
),
}
}
// FindOverlappingHPAs scans the graph in search of HorizontalPodAutoscalers that are attempting to scale the same set of pods.
// This can occur in two ways:
// - 1. label selectors for two ReplicationControllers/DeploymentConfigs/etc overlap
// - 2. multiple HorizontalPodAutoscalers are attempting to scale the same ReplicationController/DeploymentConfig/etc
// Case 1 is handled by deconflicting the area of influence of ReplicationControllers/DeploymentConfigs/etc, and therefore we
// can assume that it will be handled before this step. Therefore, we are only concerned with finding HPAs that are trying to
// scale the same resources.
//
// The algorithm that is used to implement this check is described as follows:
// - create a sub-graph containing only HPA nodes and other nodes that can be scaled, as well as any scaling edges or other
// edges used to connect between objects that can be scaled
// - for every resulting edge in the new sub-graph, create an edge in the reverse direction
// - find the shortest paths between all HPA nodes in the graph
// - shortest paths connecting two horizontal pod autoscalers are used to create markers for the graph
func FindOverlappingHPAs(graph osgraph.Graph, namer osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
nodeFilter := osgraph.NodesOfKind(
kubenodes.HorizontalPodAutoscalerNodeKind,
kubenodes.ReplicationControllerNodeKind,
appsnodes.DeploymentConfigNodeKind,
)
edgeFilter := osgraph.EdgesOfKind(
kubegraph.ScalingEdgeKind,
appsgraph.DeploymentEdgeKind,
kubeedges.ManagedByControllerEdgeKind,
)
hpaSubGraph := graph.Subgraph(nodeFilter, edgeFilter)
for _, edge := range hpaSubGraph.Edges() {
osgraph.AddReversedEdge(hpaSubGraph, edge.From(), edge.To(), sets.NewString())
}
hpaNodes := hpaSubGraph.NodesByKind(kubenodes.HorizontalPodAutoscalerNodeKind)
for _, firstHPA := range hpaNodes {
// we can use Dijkstra's algorithm as we know we do not have any negative edge weights
shortestPaths := path.DijkstraFrom(firstHPA, hpaSubGraph)
for _, secondHPA := range hpaNodes {
if firstHPA == secondHPA {
continue
}
shortestPath, _ := shortestPaths.To(secondHPA)
if shortestPath == nil {
// if two HPAs have no path between them, no error exists
continue
}
markers = append(markers, osgraph.Marker{
Node: firstHPA,
Severity: osgraph.WarningSeverity,
RelatedNodes: shortestPath[1:],
Key: HPAOverlappingScaleRefWarning,
Message: fmt.Sprintf("%s and %s overlap because they both attempt to scale %s",
namer.ResourceName(firstHPA), namer.ResourceName(secondHPA), nameList(shortestPath[1:len(shortestPath)-1], namer)),
})
}
}
return markers
}
// nameList outputs a nicely-formatted list of names:
// - given nodes ['a', 'b', 'c'], this will return "one of a, b, or c"
// - given nodes ['a', 'b'], this will return "a or b"
// - given nodes ['a'], this will return "a"
func nameList(nodes []graphapi.Node, namer osgraph.Namer) string {
names := []string{}
for _, node := range nodes {
names = append(names, namer.ResourceName(node))
}
switch len(names) {
case 0:
return ""
case 1:
return names[0]
case 2:
return names[0] + " or " + names[1]
default:
return "one of " + strings.Join(names[:len(names)-1], ", ") + ", or " + names[len(names)-1]
}
}