-
Notifications
You must be signed in to change notification settings - Fork 462
/
actuator_reconcile.go
504 lines (440 loc) · 23 KB
/
actuator_reconcile.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
// SPDX-FileCopyrightText: 2024 SAP SE or an SAP affiliate company and Gardener contributors
//
// SPDX-License-Identifier: Apache-2.0
package genericactuator
import (
"context"
"errors"
"fmt"
"strings"
"time"
machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/go-logr/logr"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/utils/ptr"
"sigs.k8s.io/controller-runtime/pkg/client"
extensionscontroller "github.com/gardener/gardener/extensions/pkg/controller"
extensionsworkercontroller "github.com/gardener/gardener/extensions/pkg/controller/worker"
extensionsworkerhelper "github.com/gardener/gardener/extensions/pkg/controller/worker/helper"
v1beta1constants "github.com/gardener/gardener/pkg/apis/core/v1beta1/constants"
v1beta1helper "github.com/gardener/gardener/pkg/apis/core/v1beta1/helper"
extensionsv1alpha1 "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1"
extensionsv1alpha1helper "github.com/gardener/gardener/pkg/apis/extensions/v1alpha1/helper"
"github.com/gardener/gardener/pkg/controllerutils"
gardenerutils "github.com/gardener/gardener/pkg/utils/gardener"
"github.com/gardener/gardener/pkg/utils/kubernetes/health"
retryutils "github.com/gardener/gardener/pkg/utils/retry"
)
func (a *genericActuator) Reconcile(ctx context.Context, log logr.Logger, worker *extensionsv1alpha1.Worker, cluster *extensionscontroller.Cluster) error {
log = log.WithValues("operation", "reconcile")
workerDelegate, err := a.delegateFactory.WorkerDelegate(ctx, worker, cluster)
if err != nil {
return fmt.Errorf("could not instantiate actuator context: %w", err)
}
// Call pre reconciliation hook to prepare Worker reconciliation.
if err := workerDelegate.PreReconcileHook(ctx); err != nil {
return fmt.Errorf("pre worker reconciliation hook failed: %w", err)
}
// Get the list of all existing machine deployments.
existingMachineDeployments := &machinev1alpha1.MachineDeploymentList{}
if err := a.seedClient.List(ctx, existingMachineDeployments, client.InNamespace(worker.Namespace)); err != nil {
return err
}
// Generate the desired machine deployments.
log.Info("Generating machine deployments")
wantedMachineDeployments, err := workerDelegate.GenerateMachineDeployments(ctx)
if err != nil {
return fmt.Errorf("failed to generate the machine deployments: %w", err)
}
var (
clusterAutoscalerUsed = extensionsv1alpha1helper.ClusterAutoscalerRequired(worker.Spec.Pools)
isHibernationEnabled = extensionscontroller.IsHibernationEnabled(cluster)
)
// Get list of existing machine class names
existingMachineClassNames, err := a.listMachineClassNames(ctx, worker.Namespace)
if err != nil {
return err
}
// Deploy generated machine classes.
log.Info("Deploying machine classes")
if err := workerDelegate.DeployMachineClasses(ctx); err != nil {
return fmt.Errorf("failed to deploy the machine classes: %w", err)
}
// Update the machine images in the worker provider status.
if err := workerDelegate.UpdateMachineImagesStatus(ctx); err != nil {
return fmt.Errorf("failed to update the machine image status: %w", err)
}
existingMachineDeploymentNames := sets.Set[string]{}
for _, deployment := range existingMachineDeployments.Items {
existingMachineDeploymentNames.Insert(deployment.Name)
}
// Generate machine deployment configuration based on previously computed list of deployments and deploy them.
if err := deployMachineDeployments(ctx, log, a.seedClient, cluster, worker, existingMachineDeployments, wantedMachineDeployments, clusterAutoscalerUsed); err != nil {
return fmt.Errorf("failed to generate the machine deployment config: %w", err)
}
// update machineDeploymentsLastUpdateTime and the machine deployment slice in worker status
if err := a.updateWorkerStatusMachineDeployments(ctx, worker, wantedMachineDeployments); err != nil {
return fmt.Errorf("failed to update the machine deployments in worker status: %w", err)
}
// Wait until all generated machine deployments are healthy/available.
if err := a.waitUntilWantedMachineDeploymentsAvailable(ctx, log, cluster, worker, existingMachineDeploymentNames, existingMachineClassNames, wantedMachineDeployments); err != nil {
// check if the machine-controller-manager is stuck
isStuck, msg, err2 := a.IsMachineControllerStuck(ctx, worker)
if err2 != nil {
log.Error(err2, "Failed to check if the machine-controller-manager pod is stuck after unsuccessfully waiting for all machine deployments to be ready")
// continue in order to return `err` and determine error codes
}
if isStuck {
podList := corev1.PodList{}
if err2 := a.seedClient.List(ctx, &podList, client.InNamespace(worker.Namespace), client.MatchingLabels{"role": "machine-controller-manager"}); err2 != nil {
return fmt.Errorf("failed to list machine-controller-manager pods for worker (%s/%s): %w", worker.Namespace, worker.Name, err2)
}
for _, pod := range podList.Items {
if err2 := a.seedClient.Delete(ctx, &pod); err2 != nil {
return fmt.Errorf("failed to delete stuck machine-controller-manager pod for worker (%s/%s): %w", worker.Namespace, worker.Name, err2)
}
}
log.Info("Successfully deleted stuck machine-controller-manager pod", "reason", msg)
}
newError := fmt.Errorf("failed while waiting for all machine deployments to be ready: %w", err)
if a.errorCodeCheckFunc != nil {
return v1beta1helper.NewErrorWithCodes(newError, a.errorCodeCheckFunc(err)...)
}
return newError
}
// Delete all old machine deployments (i.e. those which were not previously computed but exist in the cluster).
if err := a.cleanupMachineDeployments(ctx, log, existingMachineDeployments, wantedMachineDeployments); err != nil {
return fmt.Errorf("failed to cleanup the machine deployments: %w", err)
}
// Delete all old machine classes (i.e. those which were not previously computed but exist in the cluster).
if err := a.cleanupMachineClasses(ctx, log, worker.Namespace, wantedMachineDeployments); err != nil {
return fmt.Errorf("failed to cleanup the machine classes: %w", err)
}
// Delete all old machine class secrets (i.e. those which were not previously computed but exist in the cluster).
if err := a.cleanupMachineClassSecrets(ctx, log, worker.Namespace, wantedMachineDeployments); err != nil {
return fmt.Errorf("failed to cleanup the orphaned machine class secrets: %w", err)
}
deployment := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Name: v1beta1constants.DeploymentNameMachineControllerManager, Namespace: worker.Namespace}}
if err := a.seedClient.Get(ctx, client.ObjectKeyFromObject(deployment), deployment); client.IgnoreNotFound(err) != nil {
return fmt.Errorf("failed reading deployment %s: %w", client.ObjectKeyFromObject(deployment), err)
}
if ptr.Deref(deployment.Spec.Replicas, 0) > 0 {
// Wait until all unwanted machine deployments are deleted from the system.
if err := a.waitUntilUnwantedMachineDeploymentsDeleted(ctx, log, worker, wantedMachineDeployments); err != nil {
return fmt.Errorf("error while waiting for all undesired machine deployments to be deleted: %w", err)
}
}
// Delete MachineSets having number of desired and actual replicas equaling 0
if err := a.cleanupMachineSets(ctx, log, worker.Namespace); err != nil {
return fmt.Errorf("failed to cleanup the machine sets: %w", err)
}
// Scale down machine-controller-manager if shoot is hibernated.
if isHibernationEnabled {
if err := scaleMachineControllerManager(ctx, log, a.seedClient, worker, 0); err != nil {
return err
}
}
// Call post reconciliation hook after Worker reconciliation has happened.
if err := workerDelegate.PostReconcileHook(ctx); err != nil {
return fmt.Errorf("post worker reconciliation hook failed: %w", err)
}
return nil
}
func deployMachineDeployments(
ctx context.Context,
log logr.Logger,
cl client.Client,
cluster *extensionscontroller.Cluster,
worker *extensionsv1alpha1.Worker,
existingMachineDeployments *machinev1alpha1.MachineDeploymentList,
wantedMachineDeployments extensionsworkercontroller.MachineDeployments,
clusterAutoscalerUsed bool,
) error {
log.Info("Deploying machine deployments")
for _, deployment := range wantedMachineDeployments {
var (
labels = map[string]string{"name": deployment.Name}
existingMachineDeployment = getExistingMachineDeployment(existingMachineDeployments, deployment.Name)
replicas int32
)
switch {
// If the Shoot is hibernated then the machine deployment's replicas should be zero.
// Also mark all machines for forceful deletion to avoid respecting of PDBs/SLAs in case of cluster hibernation.
case extensionscontroller.IsHibernationEnabled(cluster):
replicas = 0
if err := markAllMachinesForcefulDeletion(ctx, log, cl, worker.Namespace); err != nil {
return fmt.Errorf("marking all machines for forceful deletion failed: %w", err)
}
// If the cluster autoscaler is not enabled then min=max (as per API validation), hence
// we can use either min or max.
case !clusterAutoscalerUsed:
replicas = deployment.Minimum
// If the machine deployment does not yet exist we set replicas to min so that the cluster
// autoscaler can scale them as required.
case existingMachineDeployment == nil:
if deployment.State != nil {
// During restoration the actual replica count is in the State.Replicas
// If wanted deployment has no corresponding existing deployment, but has State, then we are in restoration process
replicas = deployment.State.Replicas
} else {
replicas = deployment.Minimum
}
// If the Shoot was hibernated and is now woken up we set replicas to min so that the cluster
// autoscaler can scale them as required.
case shootIsAwake(extensionscontroller.IsHibernationEnabled(cluster), existingMachineDeployments):
replicas = deployment.Minimum
// If the shoot worker pool minimum was updated and if the current machine deployment replica
// count is less than minimum, we update the machine deployment replica count to updated minimum.
case existingMachineDeployment.Spec.Replicas < deployment.Minimum:
replicas = deployment.Minimum
// If the shoot worker pool maximum was updated and if the current machine deployment replica
// count is greater than maximum, we update the machine deployment replica count to updated maximum.
case existingMachineDeployment.Spec.Replicas > deployment.Maximum:
replicas = deployment.Maximum
// In this case the machine deployment must exist (otherwise the above case was already true),
// and the cluster autoscaler must be enabled. We do not want to override the machine deployment's
// replicas as the cluster autoscaler is responsible for setting appropriate values.
default:
replicas = getDeploymentSpecReplicas(existingMachineDeployments, deployment.Name)
if replicas == -1 {
replicas = deployment.Minimum
}
}
machineDeployment := &machinev1alpha1.MachineDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: deployment.Name,
Namespace: worker.Namespace,
},
}
if _, err := controllerutils.GetAndCreateOrMergePatch(ctx, cl, machineDeployment, func() error {
for k, v := range deployment.ClusterAutoscalerAnnotations {
metav1.SetMetaDataAnnotation(&machineDeployment.ObjectMeta, k, v)
}
machineDeployment.Spec = machinev1alpha1.MachineDeploymentSpec{
Replicas: replicas,
MinReadySeconds: 500,
Strategy: machinev1alpha1.MachineDeploymentStrategy{
Type: machinev1alpha1.RollingUpdateMachineDeploymentStrategyType,
RollingUpdate: &machinev1alpha1.RollingUpdateMachineDeployment{
MaxSurge: &deployment.MaxSurge,
MaxUnavailable: &deployment.MaxUnavailable,
},
},
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Template: machinev1alpha1.MachineTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: machinev1alpha1.MachineSpec{
Class: machinev1alpha1.ClassSpec{
Kind: "MachineClass",
Name: deployment.ClassName,
},
NodeTemplateSpec: machinev1alpha1.NodeTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: deployment.Annotations,
Labels: deployment.Labels,
},
Spec: corev1.NodeSpec{
Taints: deployment.Taints,
},
},
MachineConfiguration: deployment.MachineConfiguration,
},
},
}
log.Info("Deploying machine deployment", "machineDeploymentName", machineDeployment.Name, "replicas", machineDeployment.Spec.Replicas)
return nil
}); err != nil {
return err
}
}
return nil
}
// waitUntilWantedMachineDeploymentsAvailable waits until all the desired <machineDeployments> were marked as healthy /
// available by the machine-controller-manager. It polls the status every 5 seconds.
func (a *genericActuator) waitUntilWantedMachineDeploymentsAvailable(ctx context.Context, log logr.Logger, cluster *extensionscontroller.Cluster, worker *extensionsv1alpha1.Worker, alreadyExistingMachineDeploymentNames sets.Set[string], alreadyExistingMachineClassNames sets.Set[string], wantedMachineDeployments extensionsworkercontroller.MachineDeployments) error {
log.Info("Waiting until wanted machine deployments are available")
return retryutils.UntilTimeout(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
var numHealthyDeployments, numUpdated, numAvailable, numUnavailable, numDesired, numberOfAwakeMachines int32
// Get the list of all machine deployments
machineDeployments := &machinev1alpha1.MachineDeploymentList{}
if err := a.seedClient.List(ctx, machineDeployments, client.InNamespace(worker.Namespace)); err != nil {
return retryutils.SevereError(err)
}
// Get the list of all machine sets
machineSets := &machinev1alpha1.MachineSetList{}
if err := a.seedClient.List(ctx, machineSets, client.InNamespace(worker.Namespace)); err != nil {
return retryutils.SevereError(err)
}
// map the owner reference to the machine sets
ownerReferenceToMachineSet := gardenerutils.BuildOwnerToMachineSetsMap(machineSets.Items)
// Collect the numbers of available and desired replicas.
for _, deployment := range machineDeployments.Items {
wantedDeployment := wantedMachineDeployments.FindByName(deployment.Name)
// Filter out all machine deployments that are not desired (any more).
if wantedDeployment == nil {
continue
}
// We want to wait until all wanted machine deployments have as many
// available replicas as desired (specified in the .spec.replicas).
// However, if we see any error in the status of the deployment then we return it.
if machineErrs := extensionsworkerhelper.ReportFailedMachines(deployment.Status); machineErrs != nil {
return retryutils.SevereError(machineErrs)
}
numberOfAwakeMachines += deployment.Status.Replicas
// Skip further checks if cluster is hibernated because machine-controller-manager is usually scaled down during hibernation.
if extensionscontroller.IsHibernationEnabled(cluster) {
continue
}
// we only care about rolling updates when the cluster is not hibernated
// when hibernated, just wait until the sum of `.Status.Replicas` over all machine deployments equals 0.
machineSets := ownerReferenceToMachineSet[deployment.Name]
// use `wanted deployment` for these checks, as the existing deployments can be based on an outdated cache
alreadyExistingMachineDeployment := alreadyExistingMachineDeploymentNames.Has(wantedDeployment.Name)
newMachineClass := !alreadyExistingMachineClassNames.Has(wantedDeployment.ClassName)
if alreadyExistingMachineDeployment && newMachineClass {
log.Info("Machine deployment is performing a rolling update", "machineDeployment", &deployment)
// Already existing machine deployments with a rolling update should have > 1 machine sets
if len(machineSets) <= 1 {
err := fmt.Errorf("waiting for the machine-controller-manager to create the machine sets for the machine deployment (%s/%s)", deployment.Namespace, deployment.Name)
log.Error(err, "Minor error while waiting for wanted MachineDeployments to become available")
return retryutils.MinorError(err)
}
}
// If the Shoot is not hibernated we want to make sure that the machine set with the right
// machine class for the machine deployment is deployed by the machine-controller-manager
if machineSet := extensionsworkerhelper.GetMachineSetWithMachineClass(wantedDeployment.Name, wantedDeployment.ClassName, ownerReferenceToMachineSet); machineSet == nil {
return retryutils.MinorError(fmt.Errorf("waiting for the machine-controller-manager to create the updated machine set for the machine deployment (%s/%s)", deployment.Namespace, deployment.Name))
}
// If the Shoot is not hibernated we want to wait until all wanted machine deployments have as many
// available replicas as desired (specified in the .spec.replicas).
if health.CheckMachineDeployment(&deployment) == nil {
numHealthyDeployments++
}
numDesired += deployment.Spec.Replicas
numUpdated += deployment.Status.UpdatedReplicas
numAvailable += deployment.Status.AvailableReplicas
numUnavailable += deployment.Status.UnavailableReplicas
}
var msg string
switch {
case !extensionscontroller.IsHibernationEnabled(cluster):
// numUpdated == numberOfAwakeMachines waits until the old machine is deleted in the case of a rolling update with maxUnavailability = 0
// numUnavailable == 0 makes sure that every machine joined the cluster (during creation & in the case of a rolling update with maxUnavailability > 0)
if numUnavailable == 0 && numUpdated == numberOfAwakeMachines && int(numHealthyDeployments) == len(wantedMachineDeployments) {
return retryutils.Ok()
}
if numUnavailable == 0 && numAvailable == numDesired && numUpdated < numberOfAwakeMachines {
msg = fmt.Sprintf("Waiting until all old machines are drained and terminated. Waiting for %d machine(s)...", numberOfAwakeMachines-numUpdated)
break
}
msg = fmt.Sprintf("Waiting until machines are available (%d/%d desired machine(s) available, %d/%d machine(s) updated, %d machine(s) pending, %d/%d machinedeployments available)...", numAvailable, numDesired, numUpdated, numDesired, numUnavailable, numHealthyDeployments, len(wantedMachineDeployments))
default:
if numberOfAwakeMachines == 0 {
return retryutils.Ok()
}
msg = fmt.Sprintf("Waiting until all machines have been hibernated (%d still awake)...", numberOfAwakeMachines)
}
// TODO: Rework logging in this method. There should be one proper log message per MachineDeployment and on aggregated error to return.
// Currently, later MachineDeployments override earlier messages, logs are not structured, etc.
log.Info(msg) //nolint:logcheck
return retryutils.MinorError(errors.New(msg))
})
}
// waitUntilUnwantedMachineDeploymentsDeleted waits until all the undesired <machineDeployments> are deleted from the
// system. It polls the status every 5 seconds.
func (a *genericActuator) waitUntilUnwantedMachineDeploymentsDeleted(ctx context.Context, log logr.Logger, worker *extensionsv1alpha1.Worker, wantedMachineDeployments extensionsworkercontroller.MachineDeployments) error {
log.Info("Waiting until unwanted machine deployments are deleted")
return retryutils.UntilTimeout(ctx, 5*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
existingMachineDeployments := &machinev1alpha1.MachineDeploymentList{}
if err := a.seedClient.List(ctx, existingMachineDeployments, client.InNamespace(worker.Namespace)); err != nil {
return retryutils.SevereError(err)
}
for _, existingMachineDeployment := range existingMachineDeployments.Items {
if !wantedMachineDeployments.HasDeployment(existingMachineDeployment.Name) {
for _, failedMachine := range existingMachineDeployment.Status.FailedMachines {
return retryutils.SevereError(fmt.Errorf("machine %s failed: %s", failedMachine.Name, failedMachine.LastOperation.Description))
}
log.Info("Waiting until unwanted machine deployment is deleted", "machineDeployment", &existingMachineDeployment)
return retryutils.MinorError(fmt.Errorf("at least one unwanted machine deployment (%s) still exists", existingMachineDeployment.Name))
}
}
return retryutils.Ok()
})
}
func (a *genericActuator) updateWorkerStatusMachineDeployments(ctx context.Context, worker *extensionsv1alpha1.Worker, machineDeployments extensionsworkercontroller.MachineDeployments) error {
if len(machineDeployments) == 0 {
return nil
}
var statusMachineDeployments []extensionsv1alpha1.MachineDeployment
for _, machineDeployment := range machineDeployments {
statusMachineDeployments = append(statusMachineDeployments, extensionsv1alpha1.MachineDeployment{
Name: machineDeployment.Name,
Minimum: machineDeployment.Minimum,
Maximum: machineDeployment.Maximum,
})
}
updateTime := metav1.Now()
patch := client.MergeFrom(worker.DeepCopy())
worker.Status.MachineDeployments = statusMachineDeployments
worker.Status.MachineDeploymentsLastUpdateTime = &updateTime
return a.seedClient.Status().Patch(ctx, worker, patch)
}
// Helper functions
func shootIsAwake(isHibernated bool, existingMachineDeployments *machinev1alpha1.MachineDeploymentList) bool {
if isHibernated {
return false
}
for _, existingMachineDeployment := range existingMachineDeployments.Items {
if existingMachineDeployment.Spec.Replicas != 0 {
return false
}
}
return true
}
func getDeploymentSpecReplicas(existingMachineDeployments *machinev1alpha1.MachineDeploymentList, name string) int32 {
for _, existingMachineDeployment := range existingMachineDeployments.Items {
if existingMachineDeployment.Name == name {
return existingMachineDeployment.Spec.Replicas
}
}
return -1
}
func getExistingMachineDeployment(existingMachineDeployments *machinev1alpha1.MachineDeploymentList, name string) *machinev1alpha1.MachineDeployment {
for _, machineDeployment := range existingMachineDeployments.Items {
if machineDeployment.Name == name {
return &machineDeployment
}
}
return nil
}
// ReadMachineConfiguration reads the configuration from worker-pool and returns the corresponding configuration of machine-deployment.
func ReadMachineConfiguration(pool extensionsv1alpha1.WorkerPool) *machinev1alpha1.MachineConfiguration {
machineConfiguration := &machinev1alpha1.MachineConfiguration{}
poolSettings := pool.MachineControllerManagerSettings
if poolSettings != nil {
if poolSettings.MachineDrainTimeout != nil {
machineConfiguration.MachineDrainTimeout = poolSettings.MachineDrainTimeout
}
if poolSettings.MachineHealthTimeout != nil {
machineConfiguration.MachineHealthTimeout = poolSettings.MachineHealthTimeout
}
if poolSettings.MachineCreationTimeout != nil {
machineConfiguration.MachineCreationTimeout = poolSettings.MachineCreationTimeout
}
if poolSettings.MaxEvictRetries != nil {
machineConfiguration.MaxEvictRetries = poolSettings.MaxEvictRetries
}
if len(poolSettings.NodeConditions) > 0 {
nodeConditions := strings.Join(poolSettings.NodeConditions, ",")
machineConfiguration.NodeConditions = &nodeConditions
}
}
return machineConfiguration
}