Skip to content

Commit

Permalink
Merge pull request #1298 from k8s-infra-cherrypick-robot/cherry-pick-…
Browse files Browse the repository at this point in the history
…1289-to-release-1.1

[release-1.1] Fix bugs related to multiple slbs
  • Loading branch information
k8s-ci-robot committed Mar 21, 2022
2 parents 379ea16 + 26b2519 commit daa95c0
Show file tree
Hide file tree
Showing 2 changed files with 85 additions and 8 deletions.
25 changes: 19 additions & 6 deletions pkg/provider/azure_loadbalancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -324,9 +324,14 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust
return false
}

// if the current LB is what we want, keep it
lbName := strings.TrimSuffix(currLBName, consts.InternalLoadBalancerNameSuffix)
// change the LB from vmSet dedicated to primary if the vmSet becomes the primary one
if strings.EqualFold(lbName, vmSetName) {
if lbName != clusterName &&
strings.EqualFold(az.VMSet.GetPrimaryVMSetName(), vmSetName) {
klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName)
return true
}
return false
}
if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) && strings.EqualFold(clusterName, lbName) {
Expand Down Expand Up @@ -718,14 +723,16 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
if isInternalLoadBalancer(&existingLB) != isInternal {
continue
}
status, fipConfig, err := az.getServiceLoadBalancerStatus(service, &existingLB)
var fipConfig *network.FrontendIPConfiguration
status, fipConfig, err = az.getServiceLoadBalancerStatus(service, &existingLB)
if err != nil {
return nil, nil, false, err
}
if status == nil {
// service is not on this load balancer
continue
}
klog.V(4).Infof("getServiceLoadBalancer(%s, %s, %v): current lb ip: %s", service.Name, clusterName, wantLb, status.Ingress[0].IP)

// select another load balancer instead of returning
// the current one if the change is needed
Expand All @@ -751,7 +758,7 @@ func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string,
return nil, nil, false, err
}

return selectedLB, nil, exists, err
return selectedLB, status, exists, err
}

// create a default LB with meta data if not present
Expand Down Expand Up @@ -1515,11 +1522,12 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
return nil, err
}

lb, _, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs)
lb, lbStatus, _, err := az.getServiceLoadBalancer(service, clusterName, nodes, wantLb, existingLBs)
if err != nil {
klog.Errorf("reconcileLoadBalancer: failed to get load balancer for service %q, error: %v", serviceName, err)
return nil, err
}

lbName := *lb.Name
lbResourceGroup := az.getLoadBalancerResourceGroup()
lbBackendPoolID := az.getBackendPoolID(lbName, az.getLoadBalancerResourceGroup(), getBackendPoolName(clusterName, service))
Expand All @@ -1541,7 +1549,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service,
}

// reconcile the load balancer's frontend IP configurations.
ownedFIPConfig, changed, err := az.reconcileFrontendIPConfigs(clusterName, service, lb, wantLb, defaultLBFrontendIPConfigName)
ownedFIPConfig, changed, err := az.reconcileFrontendIPConfigs(clusterName, service, lb, lbStatus, wantLb, defaultLBFrontendIPConfigName)
if err != nil {
return lb, err
}
Expand Down Expand Up @@ -1729,7 +1737,7 @@ func (az *Cloud) reconcileLBRules(lb *network.LoadBalancer, service *v1.Service,
return dirtyRules
}

func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Service, lb *network.LoadBalancer, wantLb bool, defaultLBFrontendIPConfigName string) (*network.FrontendIPConfiguration, bool, error) {
func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Service, lb *network.LoadBalancer, status *v1.LoadBalancerStatus, wantLb bool, defaultLBFrontendIPConfigName string) (*network.FrontendIPConfiguration, bool, error) {
var err error
lbName := *lb.Name
serviceName := getServiceName(service)
Expand Down Expand Up @@ -1829,8 +1837,13 @@ func (az *Cloud) reconcileFrontendIPConfigs(clusterName string, service *v1.Serv
if loadBalancerIP != "" {
configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic
configProperties.PrivateIPAddress = &loadBalancerIP
} else if status != nil && len(status.Ingress) > 0 {
klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): keep the original private IP %s", serviceName, status.Ingress[0].IP)
configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodStatic
configProperties.PrivateIPAddress = to.StringPtr(status.Ingress[0].IP)
} else {
// We'll need to call GetLoadBalancer later to retrieve allocated IP.
klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): dynamically allocate the private IP", serviceName)
configProperties.PrivateIPAllocationMethod = network.IPAllocationMethodDynamic
}

Expand Down
68 changes: 66 additions & 2 deletions pkg/provider/azure_loadbalancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1318,6 +1318,37 @@ func TestGetServiceLoadBalancer(t *testing.T) {
expectedExists: false,
expectedError: false,
},
{
desc: "getServiceLoadBalancer should create a new lb and return the status of the previous lb",
sku: "Basic",
wantLB: true,
service: getTestService("service1", v1.ProtocolTCP, map[string]string{consts.ServiceAnnotationLoadBalancerMode: "as", consts.ServiceAnnotationLoadBalancerInternal: consts.TrueAnnotationValue}, false, 80),
existingLBs: []network.LoadBalancer{
{
Name: to.StringPtr("as-internal"),
Location: to.StringPtr("westus"),
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
FrontendIPConfigurations: &[]network.FrontendIPConfiguration{
{
Name: to.StringPtr("aservice1"),
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
PrivateIPAddress: to.StringPtr("1.2.3.4"),
},
},
},
},
},
},
expectedLB: &network.LoadBalancer{
Name: to.StringPtr("testCluster-internal"),
Location: to.StringPtr("westus"),
Sku: &network.LoadBalancerSku{
Name: network.LoadBalancerSkuNameBasic,
},
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{},
},
expectedStatus: &v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: "1.2.3.4", Hostname: ""}}},
},
}

ctrl := gomock.NewController(t)
Expand All @@ -1330,6 +1361,7 @@ func TestGetServiceLoadBalancer(t *testing.T) {
mockLBsClient := mockloadbalancerclient.NewMockInterface(ctrl)
mockLBsClient.EXPECT().CreateOrUpdate(gomock.Any(), "rg", gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
mockLBsClient.EXPECT().List(gomock.Any(), "rg").Return(test.existingLBs, nil)
mockLBsClient.EXPECT().Delete(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
az.LoadBalancerClient = mockLBsClient

for _, existingLB := range test.existingLBs {
Expand All @@ -1338,7 +1370,9 @@ func TestGetServiceLoadBalancer(t *testing.T) {
t.Fatalf("TestCase[%d] meets unexpected error: %v", i, err)
}
}
test.service.Annotations = test.annotations
if test.annotations != nil {
test.service.Annotations = test.annotations
}
az.LoadBalancerSku = test.sku
lb, status, exists, err := az.getServiceLoadBalancer(&test.service, testClusterName,
clusterResources.nodes, test.wantLB, []network.LoadBalancer{})
Expand Down Expand Up @@ -4488,6 +4522,18 @@ func TestShouldChangeLoadBalancer(t *testing.T) {
res := cloud.shouldChangeLoadBalancer(&service, "testCluster-internal", "testCluster")
assert.False(t, res)
})

t.Run("shouldChangeLoadBalancer should return true if the mode is the same as the current LB but the vmSet is the primary one", func(t *testing.T) {
cloud.LoadBalancerSku = consts.LoadBalancerSkuStandard
cloud.EnableMultipleStandardLoadBalancers = true
cloud.PrimaryAvailabilitySetName = "vmss-1"
annotations := map[string]string{
consts.ServiceAnnotationLoadBalancerMode: "vmss-1",
}
service := getTestService("service1", v1.ProtocolTCP, annotations, false, 80)
res := cloud.shouldChangeLoadBalancer(&service, "vmss-1", "testCluster")
assert.True(t, res)
})
}

func TestRemoveFrontendIPConfigurationFromLoadBalancerDelete(t *testing.T) {
Expand Down Expand Up @@ -4589,11 +4635,13 @@ func TestReconcileZonesForFrontendIPConfigs(t *testing.T) {
service v1.Service
existingFrontendIPConfigs []network.FrontendIPConfiguration
existingPIP network.PublicIPAddress
status *v1.LoadBalancerStatus
getPIPError *retry.Error
getZoneError *retry.Error
regionZonesMap map[string][]string
expectedZones *[]string
expectedDirty bool
expectedIP string
expectedErr error
}{
{
Expand Down Expand Up @@ -4657,6 +4705,17 @@ func TestReconcileZonesForFrontendIPConfigs(t *testing.T) {
expectedZones: &[]string{"1"},
expectedDirty: true,
},
{
description: "reconcileFrontendIPConfigs should reuse the existing private IP for internal services",
service: getInternalTestService("test", 80),
status: &v1.LoadBalancerStatus{
Ingress: []v1.LoadBalancerIngress{
{IP: "1.2.3.4"},
},
},
expectedIP: "1.2.3.4",
expectedDirty: true,
},
} {
t.Run(tc.description, func(t *testing.T) {
cloud := GetTestCloud(ctrl)
Expand All @@ -4679,7 +4738,7 @@ func TestReconcileZonesForFrontendIPConfigs(t *testing.T) {
cloud.ZoneClient = zoneClient

defaultLBFrontendIPConfigName := cloud.getDefaultFrontendIPConfigName(&tc.service)
_, dirty, err := cloud.reconcileFrontendIPConfigs("testCluster", &tc.service, &lb, true, defaultLBFrontendIPConfigName)
_, dirty, err := cloud.reconcileFrontendIPConfigs("testCluster", &tc.service, &lb, tc.status, true, defaultLBFrontendIPConfigName)
if tc.expectedErr == nil {
assert.NoError(t, err)
} else {
Expand All @@ -4692,6 +4751,11 @@ func TestReconcileZonesForFrontendIPConfigs(t *testing.T) {
assert.Equal(t, tc.expectedZones, fip.Zones)
}
}

if tc.expectedIP != "" {
assert.Equal(t, network.IPAllocationMethodStatic, (*lb.FrontendIPConfigurations)[0].PrivateIPAllocationMethod)
assert.Equal(t, tc.expectedIP, to.String((*lb.FrontendIPConfigurations)[0].PrivateIPAddress))
}
})
}
}
Expand Down

0 comments on commit daa95c0

Please sign in to comment.