-
Notifications
You must be signed in to change notification settings - Fork 2.8k
/
node.go
784 lines (664 loc) · 24 KB
/
node.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
// Copyright 2019-2020 Authors of Cilium
// Copyright 2017 Lyft, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ipam
import (
"context"
"fmt"
"time"
"github.com/cilium/cilium/pkg/defaults"
ipamTypes "github.com/cilium/cilium/pkg/ipam/types"
v2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/math"
"github.com/cilium/cilium/pkg/trigger"
"github.com/sirupsen/logrus"
)
const (
// warningInterval is the interval for warnings which should be done
// once and then repeated if the warning persists.
warningInterval = time.Hour
)
// Node represents a Kubernetes node running Cilium with an associated
// CiliumNode custom resource
type Node struct {
// mutex protects all members of this structure
mutex lock.RWMutex
// name is the name of the node
name string
// resource is the link to the CiliumNode custom resource
resource *v2.CiliumNode
// stats provides accounting for various per node statistics
stats Statistics
// lastMaxAdapterWarning is the timestamp when the last warning was
// printed that this node is out of adapters
lastMaxAdapterWarning time.Time
// instanceRunning is true when the EC2 instance backing the node is
// not running. This state is detected based on error messages returned
// when modifying instance state
instanceRunning bool
// instanceStoppedRunning records when an instance was most recently set to not running
instanceStoppedRunning time.Time
// waitingForPoolMaintenance is true when the node is subject to an
// IP allocation or release which must be performed before another
// allocation or release can be attempted
waitingForPoolMaintenance bool
// resyncNeeded is set to the current time when a resync with the EC2
// API is required. The timestamp is required to ensure that this is
// only reset if the resync started after the time stored in
// resyncNeeded. This is needed because resyncs and allocations happen
// in parallel.
resyncNeeded time.Time
// available is the map of IPs available to this node
available ipamTypes.AllocationMap
// manager is the NodeManager responsible for this node
manager *NodeManager
// poolMaintainer is the trigger used to assign/unassign
// private IP addresses of this node.
// It ensures that multiple requests to operate private IPs are
// batched together if pool maintenance is still ongoing.
poolMaintainer *trigger.Trigger
// k8sSync is the trigger used to synchronize node information with the
// K8s apiserver. The trigger is used to batch multiple updates
// together if the apiserver is slow to respond or subject to rate
// limiting.
k8sSync *trigger.Trigger
// ops is the IPAM implementation to used for this node
ops NodeOperations
// retry is the trigger used to retry pool maintenance while the
// instances API is unstable
retry *trigger.Trigger
}
// Statistics represent the IP allocation statistics of a node
type Statistics struct {
// UsedIPs is the number of IPs currently in use
UsedIPs int
// AvailableIPs is the number of IPs currently available for allocation
// by the node
AvailableIPs int
// NeededIPs is the number of IPs needed to reach the PreAllocate
// watermwark
NeededIPs int
// ExcessIPs is the number of free IPs exceeding MaxAboveWatermark
ExcessIPs int
// RemainingInterfaces is the number of interfaces that can either be
// allocated or have not yet exhausted the instance specific quota of
// addresses
RemainingInterfaces int
}
// IsRunning returns true if the node is considered to be running
func (n *Node) IsRunning() bool {
n.mutex.RLock()
defer n.mutex.RUnlock()
return n.instanceRunning
}
func (n *Node) SetRunning(running bool) {
n.mutex.Lock()
defer n.mutex.Unlock()
n.loggerLocked().Infof("Set running %t", running)
n.instanceRunning = running
if !n.instanceRunning {
n.instanceStoppedRunning = time.Now()
}
}
// Stats returns a copy of the node statistics
func (n *Node) Stats() Statistics {
n.mutex.RLock()
c := n.stats
n.mutex.RUnlock()
return c
}
// Ops returns the IPAM implementation operations for the node
func (n *Node) Ops() NodeOperations {
return n.ops
}
func (n *Node) logger() *logrus.Entry {
if n == nil {
return log
}
n.mutex.RLock()
defer n.mutex.RUnlock()
return n.loggerLocked()
}
func (n *Node) loggerLocked() (logger *logrus.Entry) {
logger = log
if n != nil {
logger = logger.WithField(fieldName, n.name)
if n.resource != nil {
logger = logger.WithField("instanceID", n.resource.InstanceID())
}
}
return
}
// getMaxAboveWatermark returns the max-above-watermark setting for an AWS node
//
// n.mutex must be held when calling this function
func (n *Node) getMaxAboveWatermark() int {
if n.resource.Spec.IPAM.MaxAboveWatermark != 0 {
return n.resource.Spec.IPAM.MaxAboveWatermark
}
// OBSOLETE: This can be removed in Cilium 1.9
return n.resource.Spec.ENI.MaxAboveWatermark
}
// getPreAllocate returns the pre-allocation setting for an AWS node
//
// n.mutex must be held when calling this function
func (n *Node) getPreAllocate() int {
if n.resource.Spec.IPAM.PreAllocate != 0 {
return n.resource.Spec.IPAM.PreAllocate
}
// OBSOLETE: This can be removed in Cilium 1.9
if n.resource.Spec.ENI.PreAllocate != 0 {
return n.resource.Spec.ENI.PreAllocate
}
return defaults.IPAMPreAllocation
}
// getMinAllocate returns the minimum-allocation setting of an AWS node
//
// n.mutex must be held when calling this function
func (n *Node) getMinAllocate() int {
if n.resource.Spec.IPAM.MinAllocate != 0 {
return n.resource.Spec.IPAM.MinAllocate
}
// OBSOLETE: This can be removed in Cilium 1.9
return n.resource.Spec.ENI.MinAllocate
}
// getMaxAllocate returns the maximum-allocation setting of an AWS node
func (n *Node) getMaxAllocate() int {
instanceMax := n.ops.GetMaximumAllocatableIPv4()
if n.resource.Spec.IPAM.MaxAllocate > 0 {
if n.resource.Spec.IPAM.MaxAllocate > instanceMax {
n.loggerLocked().Warningf("max-allocate (%d) is higher than the instance type limits (%d)", n.resource.Spec.IPAM.MaxAllocate, instanceMax)
}
return n.resource.Spec.IPAM.MaxAllocate
}
return instanceMax
}
// GetNeededAddresses returns the number of needed addresses that need to be
// allocated or released. A positive number is returned to indicate allocation.
// A negative number is returned to indicate release of addresses.
func (n *Node) GetNeededAddresses() int {
stats := n.Stats()
if stats.NeededIPs > 0 {
return stats.NeededIPs
}
if n.manager.releaseExcessIPs && stats.ExcessIPs > 0 {
// Nodes are sorted by needed addresses, return negative values of excessIPs
// so that nodes with IP deficit are resolved first
return stats.ExcessIPs * -1
}
return 0
}
func calculateNeededIPs(availableIPs, usedIPs, preAllocate, minAllocate, maxAllocate int) (neededIPs int) {
neededIPs = preAllocate - (availableIPs - usedIPs)
if minAllocate > 0 {
neededIPs = math.IntMax(neededIPs, minAllocate-availableIPs)
}
// If maxAllocate is set (> 0) and neededIPs is higher than the
// maxAllocate value, we only return the amount of IPs that can
// still be allocated
if maxAllocate > 0 && (availableIPs+neededIPs) > maxAllocate {
neededIPs = maxAllocate - availableIPs
}
if neededIPs < 0 {
neededIPs = 0
}
return
}
func calculateExcessIPs(availableIPs, usedIPs, preAllocate, minAllocate, maxAboveWatermark int) (excessIPs int) {
// keep availableIPs above minAllocate + maxAboveWatermark as long as
// the initial socket of min-allocate + max-above-watermark has not
// been used up yet. This is the maximum potential allocation that will
// happen on initial bootstrap. Depending on interface restrictions,
// the actual allocation may be below this but we always want to avoid
// releasing IPs that have just been allocated.
if usedIPs <= (minAllocate + maxAboveWatermark) {
if availableIPs <= (minAllocate + maxAboveWatermark) {
return 0
}
}
// Once above the minimum allocation level, calculate based on
// pre-allocation limit with the max-above-watermark limit calculated
// in. This is again a best-effort calculation, depending on the
// interface restrictions, less than max-above-watermark may have been
// allocated but we never want to release IPs that have been allocated
// because of max-above-watermark.
excessIPs = availableIPs - usedIPs - preAllocate - maxAboveWatermark
if excessIPs < 0 {
excessIPs = 0
}
return
}
func (n *Node) requirePoolMaintenance() {
n.mutex.Lock()
n.waitingForPoolMaintenance = true
n.mutex.Unlock()
}
func (n *Node) poolMaintenanceComplete() {
n.mutex.Lock()
n.waitingForPoolMaintenance = false
n.mutex.Unlock()
}
// InstanceID returns the instance ID of the node
func (n *Node) InstanceID() (id string) {
n.mutex.RLock()
if n.resource != nil {
id = n.resource.InstanceID()
}
n.mutex.RUnlock()
return
}
// UpdatedResource is called when an update to the CiliumNode has been
// received. The IPAM layer will attempt to immediately resolve any IP deficits
// and also trigger the background sync to continue working in the background
// to resolve any deficits or excess.
func (n *Node) UpdatedResource(resource *v2.CiliumNode) bool {
// Deep copy the resource before storing it. This way we are not
// dependent on caller not using the resource after this call.
resource = resource.DeepCopy()
n.ops.UpdatedNode(resource)
n.mutex.Lock()
// Any modification to the custom resource is seen as a sign that the
// instance is alive
n.instanceRunning = true
n.resource = resource
n.mutex.Unlock()
n.recalculate()
allocationNeeded := n.allocationNeeded()
if allocationNeeded {
n.requirePoolMaintenance()
n.poolMaintainer.Trigger()
}
return allocationNeeded
}
func (n *Node) resourceAttached() (attached bool) {
n.mutex.RLock()
attached = n.resource != nil
n.mutex.RUnlock()
return
}
func (n *Node) recalculate() {
// Skip any recalculation if the CiliumNode resource does not exist yet
if !n.resourceAttached() {
return
}
scopedLog := n.logger()
a, err := n.ops.ResyncInterfacesAndIPs(context.TODO(), scopedLog)
n.mutex.Lock()
defer n.mutex.Unlock()
if err != nil {
scopedLog.Warning("Instance not found! Please delete corresponding ciliumnode if instance has already been deleted.")
// Avoid any further action
n.stats.NeededIPs = 0
n.stats.ExcessIPs = 0
return
}
n.available = a
n.stats.UsedIPs = len(n.resource.Status.IPAM.Used)
n.stats.AvailableIPs = len(n.available)
n.stats.NeededIPs = calculateNeededIPs(n.stats.AvailableIPs, n.stats.UsedIPs, n.getPreAllocate(), n.getMinAllocate(), n.getMaxAllocate())
n.stats.ExcessIPs = calculateExcessIPs(n.stats.AvailableIPs, n.stats.UsedIPs, n.getPreAllocate(), n.getMinAllocate(), n.getMaxAboveWatermark())
scopedLog.WithFields(logrus.Fields{
"available": n.stats.AvailableIPs,
"used": n.stats.UsedIPs,
"toAlloc": n.stats.NeededIPs,
"toRelease": n.stats.ExcessIPs,
"waitingForPoolMaintenance": n.waitingForPoolMaintenance,
"resyncNeeded": n.resyncNeeded,
}).Debug("Recalculated needed addresses")
}
// allocationNeeded returns true if this node requires IPs to be allocated
func (n *Node) allocationNeeded() (needed bool) {
n.mutex.RLock()
needed = !n.waitingForPoolMaintenance && n.resyncNeeded.IsZero() && n.stats.NeededIPs > 0
n.mutex.RUnlock()
return
}
// releaseNeeded returns true if this node requires IPs to be released
func (n *Node) releaseNeeded() (needed bool) {
n.mutex.RLock()
needed = n.manager.releaseExcessIPs && !n.waitingForPoolMaintenance && n.resyncNeeded.IsZero() && n.stats.ExcessIPs > 0
n.mutex.RUnlock()
return
}
// Pool returns the IP allocation pool available to the node
func (n *Node) Pool() (pool ipamTypes.AllocationMap) {
pool = ipamTypes.AllocationMap{}
n.mutex.RLock()
for k, allocationIP := range n.available {
pool[k] = allocationIP
}
n.mutex.RUnlock()
return
}
// ResourceCopy returns a deep copy of the CiliumNode custom resource
// associated with the node
func (n *Node) ResourceCopy() *v2.CiliumNode {
n.mutex.RLock()
defer n.mutex.RUnlock()
return n.resource.DeepCopy()
}
// createInterface creates an additional interface with the instance and
// attaches it to the instance as specified by the CiliumNode. neededAddresses
// of secondary IPs are assigned to the interface up to the maximum number of
// addresses as allowed by the instance.
func (n *Node) createInterface(ctx context.Context, a *AllocationAction) error {
if a.AvailableInterfaces == 0 {
// This is not a failure scenario, warn once per hour but do
// not track as interface allocation failure. There is a
// separate metric to track nodes running at capacity.
n.mutex.Lock()
if time.Since(n.lastMaxAdapterWarning) > warningInterval {
n.loggerLocked().Warning("Instance is out of interfaces")
n.lastMaxAdapterWarning = time.Now()
}
n.mutex.Unlock()
return nil
}
scopedLog := n.logger()
toAllocate, errCondition, err := n.ops.CreateInterface(ctx, a, scopedLog)
if err != nil {
scopedLog.Warningf("Unable to create interface on instance: %s", err)
n.manager.metricsAPI.IncAllocationAttempt(errCondition, string(a.PoolID))
return err
}
n.manager.metricsAPI.IncAllocationAttempt("success", string(a.PoolID))
n.manager.metricsAPI.AddIPAllocation(string(a.PoolID), int64(toAllocate))
return nil
}
// AllocationAction is the action to be taken to resolve allocation deficits
// for a particular node. It is returned by
// NodeOperations.PrepareIPAllocation() and passed into
// NodeOperations.AllocateIPs().
type AllocationAction struct {
// InterfaceID is set to the identifier describing the interface on
// which the IPs must be allocated. This is optional, an IPAM
// implementation can leave this empty to indicate that no interface
// context is needed or a new interface must be created.
InterfaceID string
// Interface is the interface to allocate IPs on
Interface ipamTypes.InterfaceRevision
// PoolID is the IPAM pool identifier to allocate the IPs from. This
// can correspond to a subnet ID or it can also left blank or set to a
// value such as "global" to indicate a single address pool.
PoolID ipamTypes.PoolID
// AvailableForAllocation is the number IPs available for allocation.
// If InterfaeID is set, then this number corresponds to the number of
// IPs available for allocation on that interface. This number may be
// lower than the number of IPs required to resolve the deficit.
AvailableForAllocation int
// MaxIPsToAllocate is set by the core IPAM layer before
// NodeOperations.AllocateIPs() is called and defines the maximum
// number of IPs to allocate in order to stay within the boundaries as
// defined by NodeOperations.{ MinAllocate() | PreAllocate() |
// getMaxAboveWatermark() }.
MaxIPsToAllocate int
// AvailableInterfaces is the number of interfaces available to be created
AvailableInterfaces int
}
// ReleaseAction is the action to be taken to resolve allocation excess for a
// particular node. It is returned by NodeOperations.PrepareIPRelease() and
// passed into NodeOperations.ReleaseIPs().
type ReleaseAction struct {
// InterfaceID is set to the identifier describing the interface on
// which the IPs must be released. This is optional, an IPAM
// implementation can leave this empty to indicate that no interface
// context is needed.
InterfaceID string
// PoolID is the IPAM pool identifier to release the IPs from. This can
// correspond to a subnet ID or it can also left blank or set to a
// value such as "global" to indicate a single address pool.
PoolID ipamTypes.PoolID
// IPsToRelease is the list of IPs to release
IPsToRelease []string
}
// maintenanceAction represents the resources available for allocation for a
// particular ciliumNode. If an existing interface has IP allocation capacity
// left, that capacity is used up first. If not, an available index is found to
// create a new interface.
type maintenanceAction struct {
allocation *AllocationAction
release *ReleaseAction
}
func (n *Node) determineMaintenanceAction() (*maintenanceAction, error) {
var err error
a := &maintenanceAction{}
scopedLog := n.logger()
stats := n.Stats()
// Validate that the node still requires addresses to be released, the
// request may have been resolved in the meantime.
if n.manager.releaseExcessIPs && stats.ExcessIPs > 0 {
a.release = n.ops.PrepareIPRelease(stats.ExcessIPs, scopedLog)
scopedLog = scopedLog.WithFields(logrus.Fields{
"available": stats.AvailableIPs,
"used": stats.UsedIPs,
"excess": stats.ExcessIPs,
"releasing": a.release.IPsToRelease,
"selectedInterface": a.release.InterfaceID,
"selectedPoolID": a.release.PoolID,
})
scopedLog.Info("Releasing excess IPs from node")
return a, nil
}
// Validate that the node still requires addresses to be allocated, the
// request may have been resolved in the meantime.
if stats.NeededIPs == 0 {
return nil, nil
}
a.allocation, err = n.ops.PrepareIPAllocation(scopedLog)
if err != nil {
return nil, err
}
n.mutex.RLock()
a.allocation.MaxIPsToAllocate = stats.NeededIPs + n.getMaxAboveWatermark()
n.mutex.RUnlock()
if a.allocation != nil {
n.mutex.Lock()
n.stats.RemainingInterfaces = a.allocation.AvailableInterfaces
stats = n.stats
n.mutex.Unlock()
scopedLog = scopedLog.WithFields(logrus.Fields{
"selectedInterface": a.allocation.InterfaceID,
"selectedPoolID": a.allocation.PoolID,
"maxIPsToAllocate": a.allocation.MaxIPsToAllocate,
"availableForAllocation": a.allocation.AvailableForAllocation,
"availableInterfaces": a.allocation.AvailableInterfaces,
})
}
scopedLog.WithFields(logrus.Fields{
"available": stats.AvailableIPs,
"used": stats.UsedIPs,
"neededIPs": stats.NeededIPs,
"remainingInterfaces": stats.RemainingInterfaces,
}).Info("Resolving IP deficit of node")
return a, nil
}
// maintainIPPool attempts to allocate or release all required IPs to fulfill
// the needed gap.
func (n *Node) maintainIPPool(ctx context.Context) error {
a, err := n.determineMaintenanceAction()
if err != nil {
return err
}
// Maintenance request has already been fulfilled
if a == nil {
return nil
}
scopedLog := n.logger()
// Release excess addresses
if a.release != nil && len(a.release.IPsToRelease) > 0 {
err := n.ops.ReleaseIPs(ctx, a.release)
if err == nil {
n.manager.metricsAPI.AddIPRelease(string(a.release.PoolID), int64(len(a.release.IPsToRelease)))
return nil
}
n.manager.metricsAPI.IncAllocationAttempt("ip unassignment failed", string(a.release.PoolID))
scopedLog.WithFields(logrus.Fields{
"selectedInterface": a.release.InterfaceID,
"releasingAddresses": len(a.release.IPsToRelease),
}).WithError(err).Warning("Unable to unassign IPs from interface")
return err
}
if a.allocation == nil {
scopedLog.Debug("No allocation action required")
return nil
}
// Assign needed addresses
if a.allocation.AvailableForAllocation > 0 {
a.allocation.AvailableForAllocation = math.IntMin(a.allocation.AvailableForAllocation, a.allocation.MaxIPsToAllocate)
err := n.ops.AllocateIPs(ctx, a.allocation)
if err == nil {
n.manager.metricsAPI.IncAllocationAttempt("success", string(a.allocation.PoolID))
n.manager.metricsAPI.AddIPAllocation(string(a.allocation.PoolID), int64(a.allocation.AvailableForAllocation))
return nil
}
n.manager.metricsAPI.IncAllocationAttempt("ip assignment failed", string(a.allocation.PoolID))
scopedLog.WithFields(logrus.Fields{
"selectedInterface": a.allocation.InterfaceID,
"ipsToAllocate": a.allocation.AvailableForAllocation,
}).WithError(err).Warning("Unable to assign additional IPs to interface, will create new interface")
}
return n.createInterface(ctx, a.allocation)
}
func (n *Node) isInstanceRunning() (isRunning bool) {
n.mutex.RLock()
isRunning = n.instanceRunning
n.mutex.RUnlock()
return
}
func (n *Node) requireResync() {
n.mutex.Lock()
n.resyncNeeded = time.Now()
n.mutex.Unlock()
}
func (n *Node) updateLastResync(syncTime time.Time) {
n.mutex.Lock()
if syncTime.After(n.resyncNeeded) {
n.loggerLocked().Debug("Resetting resyncNeeded")
n.resyncNeeded = time.Time{}
}
n.mutex.Unlock()
}
// MaintainIPPool attempts to allocate or release all required IPs to fulfill
// the needed gap. If required, interfaces are created.
func (n *Node) MaintainIPPool(ctx context.Context) error {
// As long as the instances API is unstable, don't perform any
// operation that can mutate state.
if !n.manager.InstancesAPIIsReady() {
if n.retry != nil {
n.retry.Trigger()
}
return fmt.Errorf("instances API is unstable. Blocking mutating operations. See logs for details.")
}
// If the instance has stopped running for less than a minute, don't attempt any deficit
// resolution and wait for the custom resource to be updated as a sign
// of life.
if !n.isInstanceRunning() && n.instanceStoppedRunning.Add(time.Minute).After(time.Now()) {
return nil
}
err := n.maintainIPPool(ctx)
if err == nil {
n.logger().Debug("Setting resync needed")
n.requireResync()
}
n.poolMaintenanceComplete()
n.recalculate()
n.manager.resyncTrigger.Trigger()
return err
}
// syncToAPIServer is called to synchronize the node content with the custom
// resource in the apiserver.
func (n *Node) syncToAPIServer() (err error) {
scopedLog := n.logger()
scopedLog.Debug("Refreshing node")
node := n.ResourceCopy()
// n.resource may not have been assigned yet
if node == nil {
return
}
origNode := node.DeepCopy()
// Always update the status first to ensure that the IPAM information
// is synced for all addresses that are marked as available.
//
// Two attempts are made in case the local resource is outdated. If the
// second attempt fails as well we are likely under heavy contention,
// fall back to the controller based background interval to retry.
var updatedNode *v2.CiliumNode
for retry := 0; retry < 2; retry++ {
if node.Status.IPAM.Used == nil {
node.Status.IPAM.Used = ipamTypes.AllocationMap{}
}
var updateErr error
n.ops.PopulateStatusFields(node)
updatedNode, updateErr = n.manager.k8sAPI.UpdateStatus(origNode, node)
if updatedNode != nil && updatedNode.Name != "" {
node = updatedNode.DeepCopy()
if updateErr == nil {
break
}
} else if updateErr != nil {
scopedLog.WithError(updateErr).WithFields(logrus.Fields{
logfields.Attempt: retry,
}).Warning("Failed to update CiliumNode status")
node, err = n.manager.k8sAPI.Get(node.Name)
if err != nil {
break
} else {
// Propagate the error in case we exit the loop without
// succeeding in updating the status.
err = updateErr
}
node = node.DeepCopy()
origNode = node.DeepCopy()
} else /* updateErr == nil */ {
err = updateErr
break
}
}
if err != nil {
scopedLog.WithError(err).Warning("Unable to update CiliumNode status")
return err
}
for retry := 0; retry < 2; retry++ {
if node.Spec.IPAM.Pool == nil {
node.Spec.IPAM.Pool = ipamTypes.AllocationMap{}
}
node.Spec.IPAM.Pool = n.Pool()
scopedLog.WithField("poolSize", len(node.Spec.IPAM.Pool)).Debug("Updating node in apiserver")
if node.Spec.IPAM.PreAllocate == 0 {
node.Spec.IPAM.PreAllocate = defaults.IPAMPreAllocation
}
updatedNode, err = n.manager.k8sAPI.Update(origNode, node)
if updatedNode != nil && updatedNode.Name != "" {
node = updatedNode.DeepCopy()
if err == nil {
break
}
} else if err != nil {
node, err = n.manager.k8sAPI.Get(node.Name)
if err != nil {
break
}
node = node.DeepCopy()
origNode = node.DeepCopy()
} else {
break
}
}
if err != nil {
scopedLog.WithError(err).Warning("Unable to update CiliumNode spec")
}
return err
}