forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
dc.go
201 lines (173 loc) · 7.44 KB
/
dc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
package analysis
import (
"fmt"
"github.com/gonum/graph"
kapi "k8s.io/kubernetes/pkg/apis/core"
kdeplutil "k8s.io/kubernetes/pkg/controller/deployment/util"
osgraph "github.com/openshift/origin/pkg/api/graph"
kubegraph "github.com/openshift/origin/pkg/api/kubegraph/nodes"
appsedges "github.com/openshift/origin/pkg/apps/graph"
appsgraph "github.com/openshift/origin/pkg/apps/graph/nodes"
buildedges "github.com/openshift/origin/pkg/build/graph"
buildutil "github.com/openshift/origin/pkg/build/util"
imageedges "github.com/openshift/origin/pkg/image/graph"
imagegraph "github.com/openshift/origin/pkg/image/graph/nodes"
)
const (
MissingImageStreamErr = "MissingImageStream"
MissingImageStreamTagWarning = "MissingImageStreamTag"
MissingReadinessProbeWarning = "MissingReadinessProbe"
SingleHostVolumeWarning = "SingleHostVolume"
MissingPVCWarning = "MissingPersistentVolumeClaim"
)
// FindDeploymentConfigTriggerErrors checks for possible failures in deployment config
// image change triggers.
//
// Precedence of failures:
// 1. The image stream for the tag of interest does not exist.
// 2. The image stream tag does not exist.
func FindDeploymentConfigTriggerErrors(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastDcNode := range g.NodesByKind(appsgraph.DeploymentConfigNodeKind) {
dcNode := uncastDcNode.(*appsgraph.DeploymentConfigNode)
marker := ictMarker(g, f, dcNode)
if marker != nil {
markers = append(markers, *marker)
}
}
return markers
}
// ictMarker inspects the image change triggers for the provided deploymentconfig and returns
// a marker in case of the following two scenarios:
//
// 1. The image stream pointed by the dc trigger doen not exist.
// 2. The image stream tag pointed by the dc trigger does not exist and there is no build in
// flight that could push to the tag.
func ictMarker(g osgraph.Graph, f osgraph.Namer, dcNode *appsgraph.DeploymentConfigNode) *osgraph.Marker {
for _, uncastIstNode := range g.PredecessorNodesByEdgeKind(dcNode, appsedges.TriggersDeploymentEdgeKind) {
if istNode := uncastIstNode.(*imagegraph.ImageStreamTagNode); !istNode.Found() {
// The image stream for the tag of interest does not exist.
if isNode, exists := doesImageStreamExist(g, uncastIstNode); !exists {
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastIstNode, isNode},
Severity: osgraph.ErrorSeverity,
Key: MissingImageStreamErr,
Message: fmt.Sprintf("The image trigger for %s will have no effect because %s does not exist.",
f.ResourceName(dcNode), f.ResourceName(isNode)),
// TODO: Suggest `oc create imagestream` once we have that.
}
}
for _, bcNode := range buildedges.BuildConfigsForTag(g, istNode) {
// Avoid warning for the dc image trigger in case there is a build in flight.
if latestBuild := buildedges.GetLatestBuild(g, bcNode); latestBuild != nil && !buildutil.IsBuildComplete(latestBuild.Build) {
return nil
}
}
// The image stream tag of interest does not exist.
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastIstNode},
Severity: osgraph.WarningSeverity,
Key: MissingImageStreamTagWarning,
Message: fmt.Sprintf("The image trigger for %s will have no effect until %s is imported or created by a build.",
f.ResourceName(dcNode), f.ResourceName(istNode)),
}
}
}
return nil
}
func doesImageStreamExist(g osgraph.Graph, istag graph.Node) (graph.Node, bool) {
for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamGraphEdgeKind) {
return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found()
}
for _, imagestream := range g.SuccessorNodesByEdgeKind(istag, imageedges.ReferencedImageStreamImageGraphEdgeKind) {
return imagestream, imagestream.(*imagegraph.ImageStreamNode).Found()
}
return nil, false
}
// FindDeploymentConfigReadinessWarnings inspects deploymentconfigs and reports those that
// don't have readiness probes set up.
func FindDeploymentConfigReadinessWarnings(g osgraph.Graph, f osgraph.Namer, setProbeCommand string) []osgraph.Marker {
markers := []osgraph.Marker{}
Node:
for _, uncastDcNode := range g.NodesByKind(appsgraph.DeploymentConfigNodeKind) {
dcNode := uncastDcNode.(*appsgraph.DeploymentConfigNode)
if t := dcNode.DeploymentConfig.Spec.Template; t != nil && len(t.Spec.Containers) > 0 {
for _, container := range t.Spec.Containers {
if container.ReadinessProbe != nil {
continue Node
}
}
// All of the containers in the deployment config lack a readiness probe
markers = append(markers, osgraph.Marker{
Node: uncastDcNode,
Severity: osgraph.InfoSeverity,
Key: MissingReadinessProbeWarning,
Message: fmt.Sprintf("%s has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful.",
f.ResourceName(dcNode)),
Suggestion: osgraph.Suggestion(fmt.Sprintf("%s %s --readiness ...", setProbeCommand, f.ResourceName(dcNode))),
})
continue Node
}
}
return markers
}
func FindPersistentVolumeClaimWarnings(g osgraph.Graph, f osgraph.Namer) []osgraph.Marker {
markers := []osgraph.Marker{}
for _, uncastDcNode := range g.NodesByKind(appsgraph.DeploymentConfigNodeKind) {
dcNode := uncastDcNode.(*appsgraph.DeploymentConfigNode)
marker := pvcMarker(g, f, dcNode)
if marker != nil {
markers = append(markers, *marker)
}
}
return markers
}
func pvcMarker(g osgraph.Graph, f osgraph.Namer, dcNode *appsgraph.DeploymentConfigNode) *osgraph.Marker {
for _, uncastPvcNode := range g.SuccessorNodesByEdgeKind(dcNode, appsedges.VolumeClaimEdgeKind) {
pvcNode := uncastPvcNode.(*kubegraph.PersistentVolumeClaimNode)
if !pvcNode.Found() {
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastPvcNode},
Severity: osgraph.WarningSeverity,
Key: MissingPVCWarning,
Message: fmt.Sprintf("%s points to a missing persistent volume claim (%s).", f.ResourceName(dcNode), f.ResourceName(pvcNode)),
// TODO: Suggestion: osgraph.Suggestion(fmt.Sprintf("oc create pvc ...")),
}
}
dc := dcNode.DeploymentConfig
isBlockedBySize := dc.Spec.Replicas > 1
isBlockedRolling := false
rollingParams := dc.Spec.Strategy.RollingParams
if rollingParams != nil {
maxSurge, _, _ := kdeplutil.ResolveFenceposts(&rollingParams.MaxSurge, &rollingParams.MaxUnavailable, dc.Spec.Replicas)
isBlockedRolling = maxSurge > 0
}
// If the claim is not RWO or deployments will not have more than a pod running at any time
// then they should be fine.
if !hasRWOAccess(pvcNode) || (!isBlockedRolling && !isBlockedBySize) {
continue
}
// This shouldn't be an issue on single-host clusters but they are not the common case anyway.
// If github.com/kubernetes/kubernetes/issues/26567 ever gets fixed upstream, then we can drop
// this warning.
return &osgraph.Marker{
Node: dcNode,
RelatedNodes: []graph.Node{uncastPvcNode},
Severity: osgraph.WarningSeverity,
Key: SingleHostVolumeWarning,
Message: fmt.Sprintf("%s references a volume which may only be used in a single pod at a time - this may lead to hung deployments", f.ResourceName(dcNode)),
}
}
return nil
}
func hasRWOAccess(pvcNode *kubegraph.PersistentVolumeClaimNode) bool {
for _, accessMode := range pvcNode.PersistentVolumeClaim.Spec.AccessModes {
if accessMode == kapi.ReadWriteOnce {
return true
}
}
return false
}