-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
machine_controller_phases.go
313 lines (266 loc) · 12.1 KB
/
machine_controller_phases.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"time"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4"
"sigs.k8s.io/cluster-api/controllers/external"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
utilconversion "sigs.k8s.io/cluster-api/util/conversion"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
)
var (
externalReadyWait = 30 * time.Second
)
func (r *MachineReconciler) reconcilePhase(_ context.Context, m *clusterv1.Machine) {
originalPhase := m.Status.Phase // nolint:ifshort
// Set the phase to "pending" if nil.
if m.Status.Phase == "" {
m.Status.SetTypedPhase(clusterv1.MachinePhasePending)
}
// Set the phase to "provisioning" if bootstrap is ready and the infrastructure isn't.
if m.Status.BootstrapReady && !m.Status.InfrastructureReady {
m.Status.SetTypedPhase(clusterv1.MachinePhaseProvisioning)
}
// Set the phase to "provisioned" if there is a provider ID.
if m.Spec.ProviderID != nil {
m.Status.SetTypedPhase(clusterv1.MachinePhaseProvisioned)
}
// Set the phase to "running" if there is a NodeRef field and infrastructure is ready.
if m.Status.NodeRef != nil && m.Status.InfrastructureReady {
m.Status.SetTypedPhase(clusterv1.MachinePhaseRunning)
}
// Set the phase to "failed" if any of Status.FailureReason or Status.FailureMessage is not-nil.
if m.Status.FailureReason != nil || m.Status.FailureMessage != nil {
m.Status.SetTypedPhase(clusterv1.MachinePhaseFailed)
}
// Set the phase to "deleting" if the deletion timestamp is set.
if !m.DeletionTimestamp.IsZero() {
m.Status.SetTypedPhase(clusterv1.MachinePhaseDeleting)
}
// If the phase has changed, update the LastUpdated timestamp
if m.Status.Phase != originalPhase {
now := metav1.Now()
m.Status.LastUpdated = &now
}
}
// reconcileExternal handles generic unstructured objects referenced by a Machine.
func (r *MachineReconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine, ref *corev1.ObjectReference) (external.ReconcileOutput, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
if err := utilconversion.ConvertReferenceAPIContract(ctx, r.Client, r.restConfig, ref); err != nil {
return external.ReconcileOutput{}, err
}
obj, err := external.Get(ctx, r.Client, ref, m.Namespace)
if err != nil {
if apierrors.IsNotFound(errors.Cause(err)) {
log.Info("could not find external ref, requeueing", "RefGVK", ref.GroupVersionKind(), "RefName", ref.Name, "Machine", m.Name, "Namespace", m.Namespace)
return external.ReconcileOutput{RequeueAfter: externalReadyWait}, nil
}
return external.ReconcileOutput{}, err
}
// if external ref is paused, return error.
if annotations.IsPaused(cluster, obj) {
log.V(3).Info("External object referenced is paused")
return external.ReconcileOutput{Paused: true}, nil
}
// Initialize the patch helper.
patchHelper, err := patch.NewHelper(obj, r.Client)
if err != nil {
return external.ReconcileOutput{}, err
}
// With the migration from v1alpha2 to v1alpha3, Machine controllers should be the owner for the
// infra Machines, hence remove any existing machineset controller owner reference
if controller := metav1.GetControllerOf(obj); controller != nil && controller.Kind == "MachineSet" {
gv, err := schema.ParseGroupVersion(controller.APIVersion)
if err != nil {
return external.ReconcileOutput{}, err
}
if gv.Group == clusterv1.GroupVersion.Group {
ownerRefs := util.RemoveOwnerRef(obj.GetOwnerReferences(), *controller)
obj.SetOwnerReferences(ownerRefs)
}
}
// Set external object ControllerReference to the Machine.
if err := controllerutil.SetControllerReference(m, obj, r.Client.Scheme()); err != nil {
return external.ReconcileOutput{}, err
}
// Set the Cluster label.
labels := obj.GetLabels()
if labels == nil {
labels = make(map[string]string)
}
labels[clusterv1.ClusterLabelName] = m.Spec.ClusterName
obj.SetLabels(labels)
// Always attempt to Patch the external object.
if err := patchHelper.Patch(ctx, obj); err != nil {
return external.ReconcileOutput{}, err
}
// Ensure we add a watcher to the external object.
if err := r.externalTracker.Watch(log, obj, &handler.EnqueueRequestForOwner{OwnerType: &clusterv1.Machine{}}); err != nil {
return external.ReconcileOutput{}, err
}
// Set failure reason and message, if any.
failureReason, failureMessage, err := external.FailuresFrom(obj)
if err != nil {
return external.ReconcileOutput{}, err
}
if failureReason != "" {
machineStatusError := capierrors.MachineStatusError(failureReason)
m.Status.FailureReason = &machineStatusError
}
if failureMessage != "" {
m.Status.FailureMessage = pointer.StringPtr(
fmt.Sprintf("Failure detected from referenced resource %v with name %q: %s",
obj.GroupVersionKind(), obj.GetName(), failureMessage),
)
}
return external.ReconcileOutput{Result: obj}, nil
}
// reconcileBootstrap reconciles the Spec.Bootstrap.ConfigRef object on a Machine.
func (r *MachineReconciler) reconcileBootstrap(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
// If the bootstrap data is populated, set ready and return.
if m.Spec.Bootstrap.DataSecretName != nil {
m.Status.BootstrapReady = true
conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition)
return ctrl.Result{}, nil
}
// If the Boostrap ref is nil (and so the machine should use user generated data secret), return.
if m.Spec.Bootstrap.ConfigRef == nil {
return ctrl.Result{}, nil
}
// Call generic external reconciler if we have an external reference.
externalResult, err := r.reconcileExternal(ctx, cluster, m, m.Spec.Bootstrap.ConfigRef)
if err != nil {
return ctrl.Result{}, err
}
if externalResult.RequeueAfter > 0 {
return ctrl.Result{RequeueAfter: externalResult.RequeueAfter}, nil
}
if externalResult.Paused {
return ctrl.Result{}, nil
}
bootstrapConfig := externalResult.Result
// If the bootstrap config is being deleted, return early.
if !bootstrapConfig.GetDeletionTimestamp().IsZero() {
return ctrl.Result{}, nil
}
// Determine if the bootstrap provider is ready.
ready, err := external.IsReady(bootstrapConfig)
if err != nil {
return ctrl.Result{}, err
}
// Report a summary of current status of the bootstrap object defined for this machine.
conditions.SetMirror(m, clusterv1.BootstrapReadyCondition,
conditions.UnstructuredGetter(bootstrapConfig),
conditions.WithFallbackValue(ready, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""),
)
// If the bootstrap provider is not ready, requeue.
if !ready {
log.Info("Bootstrap provider is not ready, requeuing")
return ctrl.Result{RequeueAfter: externalReadyWait}, nil
}
// Get and set the name of the secret containing the bootstrap data.
secretName, _, err := unstructured.NestedString(bootstrapConfig.Object, "status", "dataSecretName")
if err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve dataSecretName from bootstrap provider for Machine %q in namespace %q", m.Name, m.Namespace)
} else if secretName == "" {
return ctrl.Result{}, errors.Errorf("retrieved empty dataSecretName from bootstrap provider for Machine %q in namespace %q", m.Name, m.Namespace)
}
m.Spec.Bootstrap.DataSecretName = pointer.StringPtr(secretName)
m.Status.BootstrapReady = true
return ctrl.Result{}, nil
}
// reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Machine.
func (r *MachineReconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx, "cluster", cluster.Name)
// Call generic external reconciler.
infraReconcileResult, err := r.reconcileExternal(ctx, cluster, m, &m.Spec.InfrastructureRef)
if err != nil {
return ctrl.Result{}, err
}
if infraReconcileResult.RequeueAfter > 0 {
// Infra object went missing after the machine was up and running
if m.Status.InfrastructureReady {
log.Error(err, "Machine infrastructure reference has been deleted after being ready, setting failure state")
m.Status.FailureReason = capierrors.MachineStatusErrorPtr(capierrors.InvalidConfigurationMachineError)
m.Status.FailureMessage = pointer.StringPtr(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready",
m.Spec.InfrastructureRef.GroupVersionKind(), m.Spec.InfrastructureRef.Name))
return ctrl.Result{}, errors.Errorf("could not find %v %q for Machine %q in namespace %q, requeueing", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace)
}
return ctrl.Result{RequeueAfter: infraReconcileResult.RequeueAfter}, nil
}
// if the external object is paused, return without any further processing
if infraReconcileResult.Paused {
return ctrl.Result{}, nil
}
infraConfig := infraReconcileResult.Result
if !infraConfig.GetDeletionTimestamp().IsZero() {
return ctrl.Result{}, nil
}
// Determine if the infrastructure provider is ready.
ready, err := external.IsReady(infraConfig)
if err != nil {
return ctrl.Result{}, err
}
m.Status.InfrastructureReady = ready
// Report a summary of current status of the infrastructure object defined for this machine.
conditions.SetMirror(m, clusterv1.InfrastructureReadyCondition,
conditions.UnstructuredGetter(infraConfig),
conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""),
)
// If the infrastructure provider is not ready, return early.
if !ready {
log.Info("Infrastructure provider is not ready, requeuing")
return ctrl.Result{RequeueAfter: externalReadyWait}, nil
}
// Get Spec.ProviderID from the infrastructure provider.
var providerID string
if err := util.UnstructuredUnmarshalField(infraConfig, &providerID, "spec", "providerID"); err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve Spec.ProviderID from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace)
} else if providerID == "" {
return ctrl.Result{}, errors.Errorf("retrieved empty Spec.ProviderID from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace)
}
// Get and set Status.Addresses from the infrastructure provider.
err = util.UnstructuredUnmarshalField(infraConfig, &m.Status.Addresses, "status", "addresses")
if err != nil && err != util.ErrUnstructuredFieldNotFound {
return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve addresses from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace)
}
// Get and set the failure domain from the infrastructure provider.
var failureDomain string
err = util.UnstructuredUnmarshalField(infraConfig, &failureDomain, "spec", "failureDomain")
switch {
case err == util.ErrUnstructuredFieldNotFound: // no-op
case err != nil:
return ctrl.Result{}, errors.Wrapf(err, "failed to failure domain from infrastructure provider for Machine %q in namespace %q", m.Name, m.Namespace)
default:
m.Spec.FailureDomain = pointer.StringPtr(failureDomain)
}
m.Spec.ProviderID = pointer.StringPtr(providerID)
return ctrl.Result{}, nil
}