diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 834157cf1043..687353e90367 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -935,6 +935,7 @@ func getVmssAndResourceGroupNameByVMProviderID(providerID string) (string, strin } func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string) error { + klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID) vmssNamesMap := make(map[string]bool) // the standard load balancer supports multiple vmss in its backend while the basic sku doesn't @@ -944,7 +945,7 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back continue } // in this scenario the vmSetName is an empty string and the name of vmss should be obtained from the provider IDs of nodes - vmssName, resourceGroupName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID) + resourceGroupName, vmssName, err := getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID) if err != nil { klog.V(4).Infof("ensureVMSSInPool: found VMAS node %s, will skip checking and continue", node.Name) continue @@ -958,6 +959,7 @@ func (ss *scaleSet) ensureVMSSInPool(service *v1.Service, nodes []*v1.Node, back vmssNamesMap[vmSetName] = true } + klog.V(2).Infof("ensureVMSSInPool begins to update VMSS %v with backendPoolID %s", vmssNamesMap, backendPoolID) for vmssName := range vmssNamesMap { vmss, err := ss.getVMSS(vmssName, cacheReadTypeDefault) if err != nil {