-
Notifications
You must be signed in to change notification settings - Fork 39.2k
/
alloc.go
1070 lines (933 loc) · 36.3 KB
/
alloc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/klog/v2"
apiservice "k8s.io/kubernetes/pkg/api/service"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
netutils "k8s.io/utils/net"
)
// Allocators encapsulates the various allocators (IPs, ports) used in
// Services.
type Allocators struct {
serviceIPAllocatorsByFamily map[api.IPFamily]ipallocator.Interface
defaultServiceIPFamily api.IPFamily // --service-cluster-ip-range[0]
serviceNodePorts portallocator.Interface
}
// ServiceNodePort includes protocol and port number of a service NodePort.
type ServiceNodePort struct {
// The IP protocol for this port. Supports "TCP" and "UDP".
Protocol api.Protocol
// The port on each node on which this service is exposed.
// Default is to auto-allocate a port if the ServiceType of this Service requires one.
NodePort int32
}
// This is a trasitionary function to facilitate service REST flattening.
func makeAlloc(defaultFamily api.IPFamily, ipAllocs map[api.IPFamily]ipallocator.Interface, portAlloc portallocator.Interface) Allocators {
return Allocators{
defaultServiceIPFamily: defaultFamily,
serviceIPAllocatorsByFamily: ipAllocs,
serviceNodePorts: portAlloc,
}
}
func (al *Allocators) allocateCreate(service *api.Service, dryRun bool) (transaction, error) {
result := metaTransaction{}
success := false
defer func() {
if !success {
result.Revert()
}
}()
// Ensure IP family fields are correctly initialized. We do it here, since
// we want this to be visible even when dryRun == true.
if err := al.initIPFamilyFields(After{service}, Before{nil}); err != nil {
return nil, err
}
// Allocate ClusterIPs
//TODO(thockin): validation should not pass with empty clusterIP, but it
//does (and is tested!). Fixing that all is a big PR and will have to
//happen later.
if txn, err := al.txnAllocClusterIPs(service, dryRun); err != nil {
return nil, err
} else {
result = append(result, txn)
}
// Allocate ports
if txn, err := al.txnAllocNodePorts(service, dryRun); err != nil {
return nil, err
} else {
result = append(result, txn)
}
success = true
return result, nil
}
// attempts to default service ip families according to cluster configuration
// while ensuring that provided families are configured on cluster.
func (al *Allocators) initIPFamilyFields(after After, before Before) error {
oldService, service := before.Service, after.Service
// can not do anything here
if service.Spec.Type == api.ServiceTypeExternalName {
return nil
}
// We don't want to auto-upgrade (add an IP) or downgrade (remove an IP)
// PreferDualStack services following a cluster change to/from
// dual-stackness.
//
// That means a PreferDualStack service will only be upgraded/downgraded
// when:
// - changing ipFamilyPolicy to "RequireDualStack" or "SingleStack" AND
// - adding or removing a secondary clusterIP or ipFamily
if isMatchingPreferDualStackClusterIPFields(after, before) {
return nil // nothing more to do.
}
// If the user didn't specify ipFamilyPolicy, we can infer a default. We
// don't want a static default because we want to make sure that we never
// change between single- and dual-stack modes with explicit direction, as
// provided by ipFamilyPolicy. Consider these cases:
// * Create (POST): If they didn't specify a policy we can assume it's
// always SingleStack.
// * Update (PUT): If they didn't specify a policy we need to adopt the
// policy from before. This is better than always assuming SingleStack
// because a PUT that changes clusterIPs from 2 to 1 value but doesn't
// specify ipFamily would work.
// * Update (PATCH): If they didn't specify a policy it will adopt the
// policy from before.
if service.Spec.IPFamilyPolicy == nil {
if oldService != nil && oldService.Spec.IPFamilyPolicy != nil {
// Update from an object with policy, use the old policy
service.Spec.IPFamilyPolicy = oldService.Spec.IPFamilyPolicy
} else if service.Spec.ClusterIP == api.ClusterIPNone && len(service.Spec.Selector) == 0 {
// Special-case: headless + selectorless defaults to dual.
requireDualStack := api.IPFamilyPolicyRequireDualStack
service.Spec.IPFamilyPolicy = &requireDualStack
} else {
// create or update from an object without policy (e.g.
// ExternalName) to one that needs policy
singleStack := api.IPFamilyPolicySingleStack
service.Spec.IPFamilyPolicy = &singleStack
}
}
// Henceforth we can assume ipFamilyPolicy is set.
// Do some loose pre-validation of the input. This makes it easier in the
// rest of allocation code to not have to consider corner cases.
// TODO(thockin): when we tighten validation (e.g. to require IPs) we will
// need a "strict" and a "loose" form of this.
if el := validation.ValidateServiceClusterIPsRelatedFields(service); len(el) != 0 {
return errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
//TODO(thockin): Move this logic to validation?
el := make(field.ErrorList, 0)
// Update-only prep work.
if oldService != nil {
if getIPFamilyPolicy(service) == api.IPFamilyPolicySingleStack {
// As long as ClusterIPs and IPFamilies have not changed, setting
// the policy to single-stack is clear intent.
// ClusterIPs[0] is immutable, so it is safe to keep.
if sameClusterIPs(oldService, service) && len(service.Spec.ClusterIPs) > 1 {
service.Spec.ClusterIPs = service.Spec.ClusterIPs[0:1]
}
if sameIPFamilies(oldService, service) && len(service.Spec.IPFamilies) > 1 {
service.Spec.IPFamilies = service.Spec.IPFamilies[0:1]
}
} else {
// If the policy is anything but single-stack AND they reduced these
// fields, it's an error. They need to specify policy.
if reducedClusterIPs(After{service}, Before{oldService}) {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy,
"must be 'SingleStack' to release the secondary cluster IP"))
}
if reducedIPFamilies(After{service}, Before{oldService}) {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy,
"must be 'SingleStack' to release the secondary IP family"))
}
}
}
// Make sure ipFamilyPolicy makes sense for the provided ipFamilies and
// clusterIPs. Further checks happen below - after the special cases.
if getIPFamilyPolicy(service) == api.IPFamilyPolicySingleStack {
if len(service.Spec.ClusterIPs) == 2 {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy,
"must be 'RequireDualStack' or 'PreferDualStack' when multiple cluster IPs are specified"))
}
if len(service.Spec.IPFamilies) == 2 {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy,
"must be 'RequireDualStack' or 'PreferDualStack' when multiple IP families are specified"))
}
}
// Infer IPFamilies[] from ClusterIPs[]. Further checks happen below,
// after the special cases.
for i, ip := range service.Spec.ClusterIPs {
if ip == api.ClusterIPNone {
break
}
// We previously validated that IPs are well-formed and that if an
// ipFamilies[] entry exists it matches the IP.
fam := familyOf(ip)
// If the corresponding family is not specified, add it.
if i >= len(service.Spec.IPFamilies) {
// Families are checked more later, but this is a better error in
// this specific case (indicating the user-provided IP, rather
// than than the auto-assigned family).
if _, found := al.serviceIPAllocatorsByFamily[fam]; !found {
el = append(el, field.Invalid(field.NewPath("spec", "clusterIPs").Index(i), service.Spec.ClusterIPs,
fmt.Sprintf("%s is not configured on this cluster", fam)))
} else {
// OK to infer.
service.Spec.IPFamilies = append(service.Spec.IPFamilies, fam)
}
}
}
// If we have validation errors, bail out now so we don't make them worse.
if len(el) > 0 {
return errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
// Special-case: headless + selectorless. This has to happen before other
// checks because it explicitly allows combinations of inputs that would
// otherwise be errors.
if service.Spec.ClusterIP == api.ClusterIPNone && len(service.Spec.Selector) == 0 {
// If IPFamilies was not set by the user, start with the default
// family.
if len(service.Spec.IPFamilies) == 0 {
service.Spec.IPFamilies = []api.IPFamily{al.defaultServiceIPFamily}
}
// this follows headful services. With one exception on a single stack
// cluster the user is allowed to create headless services that has multi families
// the validation allows it
if len(service.Spec.IPFamilies) < 2 {
if *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack {
// add the alt ipfamily
if service.Spec.IPFamilies[0] == api.IPv4Protocol {
service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv6Protocol)
} else {
service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv4Protocol)
}
}
}
// nothing more needed here
return nil
}
//
// Everything below this MUST happen *after* the above special cases.
//
// Demanding dual-stack on a non dual-stack cluster.
if getIPFamilyPolicy(service) == api.IPFamilyPolicyRequireDualStack {
if len(al.serviceIPAllocatorsByFamily) < 2 {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilyPolicy"), service.Spec.IPFamilyPolicy,
"this cluster is not configured for dual-stack services"))
}
}
// If there is a family requested then it has to be configured on cluster.
for i, ipFamily := range service.Spec.IPFamilies {
if _, found := al.serviceIPAllocatorsByFamily[ipFamily]; !found {
el = append(el, field.Invalid(field.NewPath("spec", "ipFamilies").Index(i), ipFamily, "not configured on this cluster"))
}
}
// If we have validation errors, don't bother with the rest.
if len(el) > 0 {
return errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
// nil families, gets cluster default
if len(service.Spec.IPFamilies) == 0 {
service.Spec.IPFamilies = []api.IPFamily{al.defaultServiceIPFamily}
}
// If this service is looking for dual-stack and this cluster does have two
// families, append the missing family.
if *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicySingleStack &&
len(service.Spec.IPFamilies) == 1 &&
len(al.serviceIPAllocatorsByFamily) == 2 {
if service.Spec.IPFamilies[0] == api.IPv4Protocol {
service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv6Protocol)
} else if service.Spec.IPFamilies[0] == api.IPv6Protocol {
service.Spec.IPFamilies = append(service.Spec.IPFamilies, api.IPv4Protocol)
}
}
return nil
}
func (al *Allocators) txnAllocClusterIPs(service *api.Service, dryRun bool) (transaction, error) {
// clusterIPs that were allocated may need to be released in case of
// failure at a higher level.
allocated, err := al.allocClusterIPs(service, dryRun)
if err != nil {
return nil, err
}
txn := callbackTransaction{
revert: func() {
if dryRun {
return
}
actuallyReleased, err := al.releaseIPs(allocated)
if err != nil {
klog.ErrorS(err, "failed to clean up after failed service create",
"service", klog.KObj(service),
"shouldRelease", allocated,
"released", actuallyReleased)
}
},
commit: func() {
if !dryRun {
if len(allocated) > 0 {
klog.V(0).InfoS("allocated clusterIPs",
"service", klog.KObj(service),
"clusterIPs", allocated)
}
}
},
}
return txn, nil
}
// allocates ClusterIPs for a service
func (al *Allocators) allocClusterIPs(service *api.Service, dryRun bool) (map[api.IPFamily]string, error) {
// external name don't get ClusterIPs
if service.Spec.Type == api.ServiceTypeExternalName {
return nil, nil
}
// headless don't get ClusterIPs
if len(service.Spec.ClusterIPs) > 0 && service.Spec.ClusterIPs[0] == api.ClusterIPNone {
return nil, nil
}
toAlloc := make(map[api.IPFamily]string)
// at this stage, the only fact we know is that service has correct ip families
// assigned to it. It may have partial assigned ClusterIPs (Upgrade to dual stack)
// may have no ips at all. The below loop is meant to fix this
// (we also know that this cluster has these families)
// if there is no slice to work with
if service.Spec.ClusterIPs == nil {
service.Spec.ClusterIPs = make([]string, 0, len(service.Spec.IPFamilies))
}
for i, ipFamily := range service.Spec.IPFamilies {
if i > (len(service.Spec.ClusterIPs) - 1) {
service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* just a marker */)
}
toAlloc[ipFamily] = service.Spec.ClusterIPs[i]
}
// allocate
allocated, err := al.allocIPs(service, toAlloc, dryRun)
// set if successful
if err == nil {
for family, ip := range allocated {
for i, check := range service.Spec.IPFamilies {
if family == check {
service.Spec.ClusterIPs[i] = ip
// while we technically don't need to do that testing rest does not
// go through conversion logic but goes through validation *sigh*.
// so we set ClusterIP here as well
// because the testing code expects valid (as they are output-ed from conversion)
// as it patches fields
if i == 0 {
service.Spec.ClusterIP = ip
}
}
}
}
}
return allocated, err
}
func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]string, dryRun bool) (map[api.IPFamily]string, error) {
allocated := make(map[api.IPFamily]string)
for family, ip := range toAlloc {
allocator := al.serviceIPAllocatorsByFamily[family] // should always be there, as we pre validate
if dryRun {
allocator = allocator.DryRun()
}
if ip == "" {
allocatedIP, err := allocator.AllocateNext()
if err != nil {
return allocated, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err))
}
allocated[family] = allocatedIP.String()
} else {
parsedIP := netutils.ParseIPSloppy(ip)
if parsedIP == nil {
return allocated, errors.NewInternalError(fmt.Errorf("failed to parse service IP %q", ip))
}
if err := allocator.Allocate(parsedIP); err != nil {
el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))}
return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
allocated[family] = ip
}
}
return allocated, nil
}
// releases clusterIPs per family
func (al *Allocators) releaseIPs(toRelease map[api.IPFamily]string) (map[api.IPFamily]string, error) {
if toRelease == nil {
return nil, nil
}
released := make(map[api.IPFamily]string)
for family, ip := range toRelease {
allocator, ok := al.serviceIPAllocatorsByFamily[family]
if !ok {
// Maybe the cluster was previously configured for dual-stack,
// then switched to single-stack?
klog.V(0).Infof("not releasing ClusterIP %q because %s is not enabled", ip, family)
continue
}
parsedIP := netutils.ParseIPSloppy(ip)
if parsedIP == nil {
return released, errors.NewInternalError(fmt.Errorf("failed to parse service IP %q", ip))
}
if err := allocator.Release(parsedIP); err != nil {
return released, err
}
released[family] = ip
}
return released, nil
}
func (al *Allocators) txnAllocNodePorts(service *api.Service, dryRun bool) (transaction, error) {
// The allocator tracks dry-run-ness internally.
nodePortOp := portallocator.StartOperation(al.serviceNodePorts, dryRun)
txn := callbackTransaction{
commit: func() {
nodePortOp.Commit()
// We don't NEED to call Finish() here, but for that package says
// to, so for future-safety, we will.
nodePortOp.Finish()
},
revert: func() {
// Weirdly named but this will revert if commit wasn't called
nodePortOp.Finish()
},
}
// Allocate NodePorts, if needed.
if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer {
if err := initNodePorts(service, nodePortOp); err != nil {
txn.Revert()
return nil, err
}
}
// Handle ExternalTraffic related fields during service creation.
if apiservice.NeedsHealthCheck(service) {
if err := al.allocHealthCheckNodePort(service, nodePortOp); err != nil {
txn.Revert()
return nil, errors.NewInternalError(err)
}
}
return txn, nil
}
func initNodePorts(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) error {
svcPortToNodePort := map[int]int{}
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
if servicePort.NodePort == 0 && !shouldAllocateNodePorts(service) {
// Don't allocate new ports, but do respect specific requests.
continue
}
allocatedNodePort := svcPortToNodePort[int(servicePort.Port)]
if allocatedNodePort == 0 {
// This will only scan forward in the service.Spec.Ports list because any matches
// before the current port would have been found in svcPortToNodePort. This is really
// looking for any user provided values.
np := findRequestedNodePort(int(servicePort.Port), service.Spec.Ports)
if np != 0 {
err := nodePortOp.Allocate(np)
if err != nil {
// TODO: when validation becomes versioned, this gets more complicated.
el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), np, err.Error())}
return errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
servicePort.NodePort = int32(np)
svcPortToNodePort[int(servicePort.Port)] = np
} else {
nodePort, err := nodePortOp.AllocateNext()
if err != nil {
// TODO: what error should be returned here? It's not a
// field-level validation failure (the field is valid), and it's
// not really an internal error.
return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
}
servicePort.NodePort = int32(nodePort)
svcPortToNodePort[int(servicePort.Port)] = nodePort
}
} else if int(servicePort.NodePort) != allocatedNodePort {
// TODO(xiangpengzhao): do we need to allocate a new NodePort in this case?
// Note: the current implementation is better, because it saves a NodePort.
if servicePort.NodePort == 0 {
servicePort.NodePort = int32(allocatedNodePort)
} else {
err := nodePortOp.Allocate(int(servicePort.NodePort))
if err != nil {
// TODO: when validation becomes versioned, this gets more complicated.
el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), servicePort.NodePort, err.Error())}
return errors.NewInvalid(api.Kind("Service"), service.Name, el)
}
}
}
}
return nil
}
// allocHealthCheckNodePort allocates health check node port to service.
func (al *Allocators) allocHealthCheckNodePort(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) error {
healthCheckNodePort := service.Spec.HealthCheckNodePort
if healthCheckNodePort != 0 {
// If the request has a health check nodePort in mind, attempt to reserve it.
err := nodePortOp.Allocate(int(healthCheckNodePort))
if err != nil {
return fmt.Errorf("failed to allocate requested HealthCheck NodePort %v: %v",
healthCheckNodePort, err)
}
} else {
// If the request has no health check nodePort specified, allocate any.
healthCheckNodePort, err := nodePortOp.AllocateNext()
if err != nil {
return fmt.Errorf("failed to allocate a HealthCheck NodePort %v: %v", healthCheckNodePort, err)
}
service.Spec.HealthCheckNodePort = int32(healthCheckNodePort)
}
return nil
}
func (al *Allocators) allocateUpdate(after After, before Before, dryRun bool) (transaction, error) {
result := metaTransaction{}
success := false
defer func() {
if !success {
result.Revert()
}
}()
// Ensure IP family fields are correctly initialized. We do it here, since
// we want this to be visible even when dryRun == true.
if err := al.initIPFamilyFields(after, before); err != nil {
return nil, err
}
// Allocate ClusterIPs
//TODO(thockin): validation should not pass with empty clusterIP, but it
//does (and is tested!). Fixing that all is a big PR and will have to
//happen later.
if txn, err := al.txnUpdateClusterIPs(after, before, dryRun); err != nil {
return nil, err
} else {
result = append(result, txn)
}
// Allocate ports
if txn, err := al.txnUpdateNodePorts(after, before, dryRun); err != nil {
return nil, err
} else {
result = append(result, txn)
}
success = true
return result, nil
}
func (al *Allocators) txnUpdateClusterIPs(after After, before Before, dryRun bool) (transaction, error) {
service := after.Service
allocated, released, err := al.updateClusterIPs(after, before, dryRun)
if err != nil {
return nil, err
}
// on failure: Any newly allocated IP must be released back
// on failure: Any previously allocated IP that would have been released,
// must *not* be released
// on success: Any previously allocated IP that should be released, will be
// released
txn := callbackTransaction{
commit: func() {
if dryRun {
return
}
if len(allocated) > 0 {
klog.V(0).InfoS("allocated clusterIPs",
"service", klog.KObj(service),
"clusterIPs", allocated)
}
if actuallyReleased, err := al.releaseIPs(released); err != nil {
klog.ErrorS(err, "failed to clean up after successful service update",
"service", klog.KObj(service),
"shouldRelease", released,
"released", actuallyReleased)
}
},
revert: func() {
if dryRun {
return
}
if actuallyReleased, err := al.releaseIPs(allocated); err != nil {
klog.ErrorS(err, "failed to clean up after failed service update",
"service", klog.KObj(service),
"shouldRelease", allocated,
"released", actuallyReleased)
}
},
}
return txn, nil
}
// handles type change/upgrade/downgrade change type for an update service
// this func does not perform actual release of clusterIPs. it returns
// a map[family]ip for the caller to release when everything else has
// executed successfully
func (al *Allocators) updateClusterIPs(after After, before Before, dryRun bool) (allocated map[api.IPFamily]string, toRelease map[api.IPFamily]string, err error) {
oldService, service := before.Service, after.Service
// We don't want to auto-upgrade (add an IP) or downgrade (remove an IP)
// PreferDualStack services following a cluster change to/from
// dual-stackness.
//
// That means a PreferDualStack service will only be upgraded/downgraded
// when:
// - changing ipFamilyPolicy to "RequireDualStack" or "SingleStack" AND
// - adding or removing a secondary clusterIP or ipFamily
if isMatchingPreferDualStackClusterIPFields(after, before) {
return allocated, toRelease, nil // nothing more to do.
}
// use cases:
// A: service changing types from ExternalName TO ClusterIP types ==> allocate all new
// B: service changing types from ClusterIP types TO ExternalName ==> release all allocated
// C: Service upgrading to dual stack ==> partial allocation
// D: service downgrading from dual stack ==> partial release
// CASE A:
// Update service from ExternalName to non-ExternalName, should initialize ClusterIP.
if oldService.Spec.Type == api.ServiceTypeExternalName && service.Spec.Type != api.ServiceTypeExternalName {
allocated, err := al.allocClusterIPs(service, dryRun)
return allocated, nil, err
}
// if headless service then we bail out early (no clusterIPs management needed)
if len(oldService.Spec.ClusterIPs) > 0 && oldService.Spec.ClusterIPs[0] == api.ClusterIPNone {
return nil, nil, nil
}
// CASE B:
// Update service from non-ExternalName to ExternalName, should release ClusterIP if exists.
if oldService.Spec.Type != api.ServiceTypeExternalName && service.Spec.Type == api.ServiceTypeExternalName {
toRelease = make(map[api.IPFamily]string)
for i, family := range oldService.Spec.IPFamilies {
toRelease[family] = oldService.Spec.ClusterIPs[i]
}
return nil, toRelease, nil
}
upgraded := len(oldService.Spec.IPFamilies) == 1 && len(service.Spec.IPFamilies) == 2
downgraded := len(oldService.Spec.IPFamilies) == 2 && len(service.Spec.IPFamilies) == 1
// CASE C:
if upgraded {
toAllocate := make(map[api.IPFamily]string)
// if secondary ip was named, just get it. if not add a marker
if len(service.Spec.ClusterIPs) < 2 {
service.Spec.ClusterIPs = append(service.Spec.ClusterIPs, "" /* marker */)
}
toAllocate[service.Spec.IPFamilies[1]] = service.Spec.ClusterIPs[1]
// allocate
allocated, err := al.allocIPs(service, toAllocate, dryRun)
// set if successful
if err == nil {
service.Spec.ClusterIPs[1] = allocated[service.Spec.IPFamilies[1]]
}
return allocated, nil, err
}
// CASE D:
if downgraded {
toRelease = make(map[api.IPFamily]string)
toRelease[oldService.Spec.IPFamilies[1]] = oldService.Spec.ClusterIPs[1]
// note: we don't release clusterIP, this is left to clean up in the action itself
return nil, toRelease, err
}
// it was not an upgrade nor downgrade
return nil, nil, nil
}
func (al *Allocators) txnUpdateNodePorts(after After, before Before, dryRun bool) (transaction, error) {
oldService, service := before.Service, after.Service
// The allocator tracks dry-run-ness internally.
nodePortOp := portallocator.StartOperation(al.serviceNodePorts, dryRun)
txn := callbackTransaction{
commit: func() {
nodePortOp.Commit()
// We don't NEED to call Finish() here, but for that package says
// to, so for future-safety, we will.
nodePortOp.Finish()
},
revert: func() {
// Weirdly named but this will revert if commit wasn't called
nodePortOp.Finish()
},
}
// Update service from NodePort or LoadBalancer to ExternalName or ClusterIP, should release NodePort if exists.
if (oldService.Spec.Type == api.ServiceTypeNodePort || oldService.Spec.Type == api.ServiceTypeLoadBalancer) &&
(service.Spec.Type == api.ServiceTypeExternalName || service.Spec.Type == api.ServiceTypeClusterIP) {
al.releaseNodePorts(oldService, nodePortOp)
}
// Update service from any type to NodePort or LoadBalancer, should update NodePort.
if service.Spec.Type == api.ServiceTypeNodePort || service.Spec.Type == api.ServiceTypeLoadBalancer {
if err := al.updateNodePorts(After{service}, Before{oldService}, nodePortOp); err != nil {
txn.Revert()
return nil, err
}
}
// Handle ExternalTraffic related updates.
success, err := al.updateHealthCheckNodePort(After{service}, Before{oldService}, nodePortOp)
if !success || err != nil {
txn.Revert()
return nil, err
}
return txn, nil
}
func (al *Allocators) releaseNodePorts(service *api.Service, nodePortOp *portallocator.PortAllocationOperation) {
nodePorts := collectServiceNodePorts(service)
for _, nodePort := range nodePorts {
nodePortOp.ReleaseDeferred(nodePort)
}
}
func (al *Allocators) updateNodePorts(after After, before Before, nodePortOp *portallocator.PortAllocationOperation) error {
oldService, newService := before.Service, after.Service
oldNodePortsNumbers := collectServiceNodePorts(oldService)
newNodePorts := []ServiceNodePort{}
portAllocated := map[int]bool{}
for i := range newService.Spec.Ports {
servicePort := &newService.Spec.Ports[i]
if servicePort.NodePort == 0 && !shouldAllocateNodePorts(newService) {
// Don't allocate new ports, but do respect specific requests.
continue
}
nodePort := ServiceNodePort{Protocol: servicePort.Protocol, NodePort: servicePort.NodePort}
if nodePort.NodePort != 0 {
if !containsNumber(oldNodePortsNumbers, int(nodePort.NodePort)) && !portAllocated[int(nodePort.NodePort)] {
err := nodePortOp.Allocate(int(nodePort.NodePort))
if err != nil {
el := field.ErrorList{field.Invalid(field.NewPath("spec", "ports").Index(i).Child("nodePort"), nodePort.NodePort, err.Error())}
return errors.NewInvalid(api.Kind("Service"), newService.Name, el)
}
portAllocated[int(nodePort.NodePort)] = true
}
} else {
nodePortNumber, err := nodePortOp.AllocateNext()
if err != nil {
// TODO: what error should be returned here? It's not a
// field-level validation failure (the field is valid), and it's
// not really an internal error.
return errors.NewInternalError(fmt.Errorf("failed to allocate a nodePort: %v", err))
}
servicePort.NodePort = int32(nodePortNumber)
nodePort.NodePort = servicePort.NodePort
}
if containsNodePort(newNodePorts, nodePort) {
return fmt.Errorf("duplicate nodePort: %v", nodePort)
}
newNodePorts = append(newNodePorts, nodePort)
}
newNodePortsNumbers := collectServiceNodePorts(newService)
// The comparison loops are O(N^2), but we don't expect N to be huge
// (there's a hard-limit at 2^16, because they're ports; and even 4 ports would be a lot)
for _, oldNodePortNumber := range oldNodePortsNumbers {
if containsNumber(newNodePortsNumbers, oldNodePortNumber) {
continue
}
nodePortOp.ReleaseDeferred(int(oldNodePortNumber))
}
return nil
}
// updateHealthCheckNodePort handles HealthCheckNodePort allocation/release
// and adjusts HealthCheckNodePort during service update if needed.
func (al *Allocators) updateHealthCheckNodePort(after After, before Before, nodePortOp *portallocator.PortAllocationOperation) (bool, error) {
oldService, service := before.Service, after.Service
neededHealthCheckNodePort := apiservice.NeedsHealthCheck(oldService)
oldHealthCheckNodePort := oldService.Spec.HealthCheckNodePort
needsHealthCheckNodePort := apiservice.NeedsHealthCheck(service)
switch {
// Case 1: Transition from don't need HealthCheckNodePort to needs HealthCheckNodePort.
// Allocate a health check node port or attempt to reserve the user-specified one if provided.
// Insert health check node port into the service's HealthCheckNodePort field if needed.
case !neededHealthCheckNodePort && needsHealthCheckNodePort:
if err := al.allocHealthCheckNodePort(service, nodePortOp); err != nil {
return false, errors.NewInternalError(err)
}
// Case 2: Transition from needs HealthCheckNodePort to don't need HealthCheckNodePort.
// Free the existing healthCheckNodePort and clear the HealthCheckNodePort field.
case neededHealthCheckNodePort && !needsHealthCheckNodePort:
nodePortOp.ReleaseDeferred(int(oldHealthCheckNodePort))
}
return true, nil
}
func (al *Allocators) releaseAllocatedResources(svc *api.Service) {
al.releaseClusterIPs(svc)
for _, nodePort := range collectServiceNodePorts(svc) {
err := al.serviceNodePorts.Release(nodePort)
if err != nil {
// these should be caught by an eventual reconciliation / restart
utilruntime.HandleError(fmt.Errorf("Error releasing service %s node port %d: %v", svc.Name, nodePort, err))
}
}
if apiservice.NeedsHealthCheck(svc) {
nodePort := svc.Spec.HealthCheckNodePort
if nodePort > 0 {
err := al.serviceNodePorts.Release(int(nodePort))
if err != nil {
// these should be caught by an eventual reconciliation / restart
utilruntime.HandleError(fmt.Errorf("Error releasing service %s health check node port %d: %v", svc.Name, nodePort, err))
}
}
}
}
// releases allocated ClusterIPs for service that is about to be deleted
func (al *Allocators) releaseClusterIPs(service *api.Service) (released map[api.IPFamily]string, err error) {
// external name don't get ClusterIPs
if service.Spec.Type == api.ServiceTypeExternalName {
return nil, nil
}
// headless don't get ClusterIPs
if len(service.Spec.ClusterIPs) > 0 && service.Spec.ClusterIPs[0] == api.ClusterIPNone {
return nil, nil
}
toRelease := make(map[api.IPFamily]string)
for _, ip := range service.Spec.ClusterIPs {
if netutils.IsIPv6String(ip) {
toRelease[api.IPv6Protocol] = ip
} else {
toRelease[api.IPv4Protocol] = ip
}
}
return al.releaseIPs(toRelease)
}
// This is O(N), but we expect haystack to be small;
// so small that we expect a linear search to be faster
func containsNumber(haystack []int, needle int) bool {
for _, v := range haystack {
if v == needle {
return true
}
}
return false
}
// This is O(N), but we expect serviceNodePorts to be small;
// so small that we expect a linear search to be faster
func containsNodePort(serviceNodePorts []ServiceNodePort, serviceNodePort ServiceNodePort) bool {
for _, snp := range serviceNodePorts {
if snp == serviceNodePort {
return true
}
}
return false
}
// Loop through the service ports list, find one with the same port number and
// NodePort specified, return this NodePort otherwise return 0.
func findRequestedNodePort(port int, servicePorts []api.ServicePort) int {
for i := range servicePorts {
servicePort := servicePorts[i]
if port == int(servicePort.Port) && servicePort.NodePort != 0 {
return int(servicePort.NodePort)
}
}
return 0
}
func shouldAllocateNodePorts(service *api.Service) bool {
if service.Spec.Type == api.ServiceTypeNodePort {
return true
}
if service.Spec.Type == api.ServiceTypeLoadBalancer {
return *service.Spec.AllocateLoadBalancerNodePorts
}
return false
}
func collectServiceNodePorts(service *api.Service) []int {
servicePorts := []int{}
for i := range service.Spec.Ports {
servicePort := &service.Spec.Ports[i]
if servicePort.NodePort != 0 {
servicePorts = append(servicePorts, int(servicePort.NodePort))
}
}
return servicePorts
}
// tests if two preferred dual-stack service have matching ClusterIPFields
// assumption: old service is a valid, default service (e.g., loaded from store)
func isMatchingPreferDualStackClusterIPFields(after After, before Before) bool {
oldService, service := before.Service, after.Service
if oldService == nil {
return false
}
if service.Spec.IPFamilyPolicy == nil {
return false
}
// if type mutated then it is an update
// that needs to run through the entire process.
if oldService.Spec.Type != service.Spec.Type {
return false
}
// both must be type that gets an IP assigned
if service.Spec.Type != api.ServiceTypeClusterIP &&
service.Spec.Type != api.ServiceTypeNodePort &&
service.Spec.Type != api.ServiceTypeLoadBalancer {
return false
}
// both must be of IPFamilyPolicy==PreferDualStack
if service.Spec.IPFamilyPolicy != nil && *(service.Spec.IPFamilyPolicy) != api.IPFamilyPolicyPreferDualStack {
return false
}
if oldService.Spec.IPFamilyPolicy != nil && *(oldService.Spec.IPFamilyPolicy) != api.IPFamilyPolicyPreferDualStack {
return false
}
if !sameClusterIPs(oldService, service) {
return false
}
if !sameIPFamilies(oldService, service) {
return false
}
// they match on
// Policy: preferDualStack