From c0071c617325642bd24d9b0587273bf7a7c7db42 Mon Sep 17 00:00:00 2001 From: andyzhangx Date: Mon, 13 Jul 2020 07:35:32 +0000 Subject: [PATCH] chore: add diskclient.ListByResourceGroup interface update bazel config --- .../legacy-cloud-providers/azure/azure.go | 6 +- .../azure/azure_backoff.go | 4 +- .../azure/azure_controller_common.go | 8 +- .../azure/azure_controller_common_test.go | 2 +- .../azure/azure_controller_standard_test.go | 6 +- .../azure/azure_fakes.go | 2 +- .../azure/azure_instances.go | 24 +-- .../azure/azure_instances_test.go | 181 +----------------- .../azure/azure_loadbalancer.go | 8 +- .../azure/azure_routes_test.go | 4 +- .../azure/azure_standard.go | 6 +- .../azure/azure_standard_test.go | 12 +- .../azure/azure_test.go | 2 +- .../azure/azure_zones.go | 6 +- .../azure/clients/diskclient/BUILD | 1 + .../clients/diskclient/azure_diskclient.go | 125 ++++++++++++ .../azure/clients/diskclient/interface.go | 3 + .../diskclient/mockdiskclient/interface.go | 15 ++ 18 files changed, 195 insertions(+), 220 deletions(-) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go index fcccf5786804..b2e7a50d134c 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure.go @@ -248,7 +248,7 @@ type Cloud struct { ResourceRequestBackoff wait.Backoff metadata *InstanceMetadataService - vmSet VMSet + VMSet VMSet // ipv6DualStack allows overriding for unit testing. It's normally initialized from featuregates ipv6DualStackEnabled bool @@ -491,12 +491,12 @@ func (az *Cloud) InitializeCloudFromConfig(config *Config, fromSecret bool) erro } if strings.EqualFold(vmTypeVMSS, az.Config.VMType) { - az.vmSet, err = newScaleSet(az) + az.VMSet, err = newScaleSet(az) if err != nil { return err } } else { - az.vmSet = newAvailabilitySet(az) + az.VMSet = newAvailabilitySet(az) } az.vmCache, err = az.newVMCache() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go index c0091e3ce388..8bdd4349d45d 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go @@ -111,7 +111,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(nodeName types.NodeName) ([]st var privateIPs []string err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { var retryErr error - privateIPs, retryErr = az.vmSet.GetPrivateIPsByNodeName(string(nodeName)) + privateIPs, retryErr = az.VMSet.GetPrivateIPsByNodeName(string(nodeName)) if retryErr != nil { // won't retry since the instance doesn't exist on Azure. if retryErr == cloudprovider.InstanceNotFound { @@ -135,7 +135,7 @@ func (az *Cloud) GetIPForMachineWithRetry(name types.NodeName) (string, string, var ip, publicIP string err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { var retryErr error - ip, publicIP, retryErr = az.vmSet.GetIPByNodeName(string(name)) + ip, publicIP, retryErr = az.VMSet.GetIPByNodeName(string(name)) if retryErr != nil { klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index 5e396c80dfa5..1d2cc30e7d83 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -90,15 +90,15 @@ type controllerCommon struct { // getNodeVMSet gets the VMSet interface based on config.VMType and the real virtual machine type. func (c *controllerCommon) getNodeVMSet(nodeName types.NodeName, crt azcache.AzureCacheReadType) (VMSet, error) { - // 1. vmType is standard, return cloud.vmSet directly. + // 1. vmType is standard, return cloud.VMSet directly. if c.cloud.VMType == vmTypeStandard { - return c.cloud.vmSet, nil + return c.cloud.VMSet, nil } // 2. vmType is Virtual Machine Scale Set (vmss), convert vmSet to scaleSet. - ss, ok := c.cloud.vmSet.(*scaleSet) + ss, ok := c.cloud.VMSet.(*scaleSet) if !ok { - return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.vmSet, c.cloud.VMType) + return nil, fmt.Errorf("error of converting vmSet (%q) to scaleSet with vmType %q", c.cloud.VMSet, c.cloud.VMType) } // 3. If the node is managed by availability set, then return ss.availabilitySet. diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go index 44783eb84209..e6d30cb01c1f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go @@ -222,7 +222,7 @@ func TestCommonAttachDiskWithVMSS(t *testing.T) { } ss, err := newScaleSet(testCloud) assert.Nil(t, err) - testCloud.vmSet = ss + testCloud.VMSet = ss } common := &controllerCommon{ diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go index 0303da77ba8e..8e5cef6254b6 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_standard_test.go @@ -79,7 +79,7 @@ func TestStandardAttachDisk(t *testing.T) { for i, test := range testCases { testCloud := GetTestCloud(ctrl) - vmSet := testCloud.vmSet + vmSet := testCloud.VMSet expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false) mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) for _, vm := range expectedVMs { @@ -148,7 +148,7 @@ func TestStandardDetachDisk(t *testing.T) { for i, test := range testCases { testCloud := GetTestCloud(ctrl) - vmSet := testCloud.vmSet + vmSet := testCloud.VMSet expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false) mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) for _, vm := range expectedVMs { @@ -224,7 +224,7 @@ func TestGetDataDisks(t *testing.T) { } for i, test := range testCases { testCloud := GetTestCloud(ctrl) - vmSet := testCloud.vmSet + vmSet := testCloud.VMSet expectedVMs := setTestVirtualMachines(testCloud, map[string]string{"vm1": "PowerState/Running"}, false) mockVMsClient := testCloud.VirtualMachinesClient.(*mockvmclient.MockInterface) for _, vm := range expectedVMs { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go index 59e260cdec71..2e4f549ff48f 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_fakes.go @@ -83,7 +83,7 @@ func GetTestCloud(ctrl *gomock.Controller) (az *Cloud) { az.VirtualMachineScaleSetsClient = mockvmssclient.NewMockInterface(ctrl) az.VirtualMachineScaleSetVMsClient = mockvmssvmclient.NewMockInterface(ctrl) az.VirtualMachinesClient = mockvmclient.NewMockInterface(ctrl) - az.vmSet = newAvailabilitySet(az) + az.VMSet = newAvailabilitySet(az) az.vmCache, _ = az.newVMCache() az.lbCache, _ = az.newLBCache() az.nsgCache, _ = az.newNSGCache() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go index a0f19f85fbe4..95f8f49ae733 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances.go @@ -95,7 +95,7 @@ func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.N // Not local instance, get addresses from Azure ARM API. if !isLocalInstance { - if az.vmSet != nil { + if az.VMSet != nil { return az.addressGetter(name) } @@ -168,7 +168,7 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin return nil, nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return nil, err } @@ -189,7 +189,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri return true, nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { if err == cloudprovider.InstanceNotFound { return false, nil @@ -214,7 +214,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, nil } - nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) + nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { // Returns false, so the controller manager will continue to check InstanceExistsByProviderID(). if err == cloudprovider.InstanceNotFound { @@ -224,7 +224,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - powerStatus, err := az.vmSet.GetPowerStatusByNodeName(string(nodeName)) + powerStatus, err := az.VMSet.GetPowerStatusByNodeName(string(nodeName)) if err != nil { // Returns false, so the controller manager will continue to check InstanceExistsByProviderID(). if err == cloudprovider.InstanceNotFound { @@ -292,8 +292,8 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e // Not local instance, get instanceID from Azure ARM API. if !isLocalInstance { - if az.vmSet != nil { - return az.vmSet.GetInstanceIDByNodeName(nodeName) + if az.VMSet != nil { + return az.VMSet.GetInstanceIDByNodeName(nodeName) } // vmSet == nil indicates credentials are not provided. @@ -302,7 +302,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e return az.getLocalInstanceProviderID(metadata, nodeName) } - return az.vmSet.GetInstanceIDByNodeName(nodeName) + return az.VMSet.GetInstanceIDByNodeName(nodeName) } func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, nodeName string) (string, error) { @@ -342,7 +342,7 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string return "", nil } - name, err := az.vmSet.GetNodeNameByProviderID(providerID) + name, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return "", err } @@ -380,8 +380,8 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, return "", err } if !isLocalInstance { - if az.vmSet != nil { - return az.vmSet.GetInstanceTypeByNodeName(string(name)) + if az.VMSet != nil { + return az.VMSet.GetInstanceTypeByNodeName(string(name)) } // vmSet == nil indicates credentials are not provided. @@ -393,7 +393,7 @@ func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, } } - return az.vmSet.GetInstanceTypeByNodeName(string(name)) + return az.VMSet.GetInstanceTypeByNodeName(string(name)) } // AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go index 6e4a3cb8321d..568534efcfba 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_instances_test.go @@ -174,7 +174,7 @@ func TestInstanceID(t *testing.T) { expectedErrMsg: fmt.Errorf("failure of getting instance metadata"), }, { - name: "NodeAddresses should report error if cloud.vmSet is nil", + name: "NodeAddresses should report error if cloud.VMSet is nil", nodeName: "vm1", vmType: vmTypeStandard, useInstanceMetadata: true, @@ -194,9 +194,9 @@ func TestInstanceID(t *testing.T) { for _, test := range testcases { if test.nilVMSet { - cloud.vmSet = nil + cloud.VMSet = nil } else { - cloud.vmSet = newAvailabilitySet(cloud) + cloud.VMSet = newAvailabilitySet(cloud) } cloud.Config.VMType = test.vmType cloud.Config.UseInstanceMetadata = test.useInstanceMetadata @@ -445,7 +445,7 @@ func TestNodeAddresses(t *testing.T) { expectedErrMsg: fmt.Errorf("getError"), }, { - name: "NodeAddresses should report error if cloud.vmSet is nil", + name: "NodeAddresses should report error if cloud.VMSet is nil", nodeName: "vm1", vmType: vmTypeStandard, useInstanceMetadata: true, @@ -518,9 +518,9 @@ func TestNodeAddresses(t *testing.T) { for _, test := range testcases { if test.nilVMSet { - cloud.vmSet = nil + cloud.VMSet = nil } else { - cloud.vmSet = newAvailabilitySet(cloud) + cloud.VMSet = newAvailabilitySet(cloud) } cloud.Config.VMType = test.vmType cloud.Config.UseInstanceMetadata = test.useInstanceMetadata @@ -571,175 +571,6 @@ func TestNodeAddresses(t *testing.T) { } } -func TestIsCurrentInstance(t *testing.T) { - cloud := &Cloud{ - Config: Config{ - VMType: vmTypeStandard, - }, - } - testcases := []struct { - nodeName string - metadataVMName string - expected bool - expectedErrMsg error - }{ - { - nodeName: "node1", - metadataVMName: "node1", - expected: true, - }, - { - nodeName: "node1", - metadataVMName: "node2", - expected: false, - }, - } - - for _, test := range testcases { - real, err := cloud.isCurrentInstance(types.NodeName(test.nodeName), test.metadataVMName) - assert.Equal(t, test.expectedErrMsg, err) - assert.Equal(t, test.expected, real) - } -} - -func TestInstanceTypeByProviderID(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - cloud := GetTestCloud(ctrl) - cloud.Config.UseInstanceMetadata = true - - testcases := []struct { - name string - vmList []string - vmSize string - nodeName string - vmType string - providerID string - metadataName string - metadataTemplate string - useCustomImsCache bool - expectedVMsize string - expectedErrMsg error - }{ - { - name: "InstanceTypeByProviderID should get InstanceType from Azure API if metadata.Compute.VMSize is nil", - vmList: []string{"vm1"}, - nodeName: "vm1", - metadataName: "vm1", - vmType: vmTypeStandard, - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", - expectedVMsize: "Standard_A0", - }, - { - name: "InstanceTypeByProviderID should get InstanceType from metedata if node's name are equal to metadataName and metadata.Compute.VMSize is not nil", - vmList: []string{"vm1"}, - vmSize: "Standard_A0", - nodeName: "vm1", - metadataName: "vm1", - vmType: vmTypeStandard, - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", - expectedVMsize: "Standard_A0", - }, - { - name: "InstanceTypeByProviderID should get InstanceType from Azure API if node is not local instance", - vmList: []string{"vm2"}, - nodeName: "vm2", - metadataName: "vm1", - vmType: vmTypeStandard, - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm2", - expectedVMsize: "Standard_A0", - }, - { - name: "InstanceTypeByProviderID should return nil if node is unmanaged", - providerID: "/subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm1", - }, - { - name: "InstanceTypeByProviderID should report error if node doesn't exist", - vmList: []string{"vm1"}, - nodeName: "vm3", - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm3", - expectedErrMsg: fmt.Errorf("instance not found"), - }, - { - name: "InstanceTypeByProviderID should report error if providerID is invalid", - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachine/vm3", - expectedErrMsg: fmt.Errorf("error splitting providerID"), - }, - { - name: "InstanceTypeByProviderID should report error if providerID is null", - expectedErrMsg: fmt.Errorf("providerID is empty, the node is not initialized yet"), - }, - { - name: "InstanceTypeByProviderID should report error if metadata.Compute is nil", - nodeName: "vm1", - metadataName: "vm1", - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", - metadataTemplate: `{"network":{"interface":[]}}`, - expectedErrMsg: fmt.Errorf("failure of getting instance metadata"), - }, - { - name: "NodeAddresses should report error when invoke GetMetadata", - nodeName: "vm1", - metadataName: "vm1", - providerID: "azure:///subscriptions/subscription/resourceGroups/rg/providers/Microsoft.Compute/virtualMachines/vm1", - vmType: vmTypeStandard, - useCustomImsCache: true, - expectedErrMsg: fmt.Errorf("getError"), - }, - } - - for _, test := range testcases { - cloud.Config.VMType = test.vmType - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Errorf("Test [%s] unexpected error: %v", test.name, err) - } - - mux := http.NewServeMux() - mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if test.metadataTemplate != "" { - fmt.Fprintf(w, test.metadataTemplate) - } else { - fmt.Fprintf(w, "{\"compute\":{\"name\":\"%s\",\"vmsize\":\"%s\",\"subscriptionId\":\"subscription\",\"resourceGroupName\":\"rg\"}}", test.metadataName, test.vmSize) - } - })) - go func() { - http.Serve(listener, mux) - }() - defer listener.Close() - - cloud.metadata, err = NewInstanceMetadataService("http://" + listener.Addr().String() + "/") - if err != nil { - t.Errorf("Test [%s] unexpected error: %v", test.name, err) - } - - if test.useCustomImsCache { - cloud.metadata.imsCache, err = azcache.NewTimedcache(metadataCacheTTL, func(key string) (interface{}, error) { - return nil, fmt.Errorf("getError") - }) - if err != nil { - t.Errorf("Test [%s] unexpected error: %v", test.name, err) - } - } - - vmListWithPowerState := make(map[string]string) - for _, vm := range test.vmList { - vmListWithPowerState[vm] = "" - } - expectedVMs := setTestVirtualMachines(cloud, vmListWithPowerState, false) - mockVMsClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface) - for _, vm := range expectedVMs { - mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, *vm.Name, gomock.Any()).Return(vm, nil).AnyTimes() - } - mockVMsClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, "vm3", gomock.Any()).Return(compute.VirtualMachine{}, &retry.Error{HTTPStatusCode: http.StatusNotFound, RawError: cloudprovider.InstanceNotFound}).AnyTimes() - mockVMsClient.EXPECT().Update(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - - instanceType, err := cloud.InstanceTypeByProviderID(context.Background(), test.providerID) - assert.Equal(t, test.expectedErrMsg, err, test.name) - assert.Equal(t, test.expectedVMsize, instanceType, test.name) - } -} - func TestInstanceExistsByProviderID(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index e12b9accb50a..9b150397d02a 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -258,7 +258,7 @@ func (az *Cloud) getLoadBalancerResourceGroup() string { func (az *Cloud) getServiceLoadBalancer(service *v1.Service, clusterName string, nodes []*v1.Node, wantLb bool) (lb *network.LoadBalancer, status *v1.LoadBalancerStatus, exists bool, err error) { isInternal := requiresInternalLoadBalancer(service) var defaultLB *network.LoadBalancer - primaryVMSetName := az.vmSet.GetPrimaryVMSetName() + primaryVMSetName := az.VMSet.GetPrimaryVMSetName() defaultLBName := az.getAzureLoadBalancerName(clusterName, primaryVMSetName, isInternal) existingLBs, err := az.ListLB(service) @@ -329,7 +329,7 @@ func (az *Cloud) selectLoadBalancer(clusterName string, service *v1.Service, exi isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) - vmSetNames, err := az.vmSet.GetVMSetNames(service, nodes) + vmSetNames, err := az.VMSet.GetVMSetNames(service, nodes) if err != nil { klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err @@ -935,7 +935,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) klog.V(10).Infof("EnsureBackendPoolDeleted(%s,%s) for service %s: start", lbBackendPoolID, vmSetName, serviceName) - err := az.vmSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) + err := az.VMSet.EnsureBackendPoolDeleted(service, lbBackendPoolID, vmSetName, lb.BackendAddressPools) if err != nil { klog.Errorf("EnsureBackendPoolDeleted(%s) for service %s failed: %v", lbBackendPoolID, serviceName, err) return nil, err @@ -979,7 +979,7 @@ func (az *Cloud) reconcileLoadBalancer(clusterName string, service *v1.Service, vmSetName := az.mapLoadBalancerNameToVMSet(lbName, clusterName) // Etag would be changed when updating backend pools, so invalidate lbCache after it. defer az.lbCache.Delete(lbName) - err := az.vmSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) + err := az.VMSet.EnsureHostsInPool(service, nodes, lbBackendPoolID, vmSetName, isInternal) if err != nil { return nil, err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes_test.go index 06d64213b057..769d63a3c154 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_routes_test.go @@ -119,7 +119,7 @@ func TestCreateRoute(t *testing.T) { cloud := &Cloud{ RouteTablesClient: routeTableClient, - vmSet: mockVMSet, + VMSet: mockVMSet, Config: Config{ RouteTableResourceGroup: "foo", RouteTableName: "bar", @@ -526,7 +526,7 @@ func TestListRoutes(t *testing.T) { cloud := &Cloud{ RouteTablesClient: routeTableClient, - vmSet: mockVMSet, + VMSet: mockVMSet, Config: Config{ RouteTableResourceGroup: "foo", RouteTableName: "bar", diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go index e9a2f049c12e..174ba45e69d9 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard.go @@ -135,7 +135,7 @@ func (az *Cloud) getNetworkResourceSubscriptionID() string { func (az *Cloud) mapLoadBalancerNameToVMSet(lbName string, clusterName string) (vmSetName string) { vmSetName = strings.TrimSuffix(lbName, InternalLoadBalancerNameSuffix) if strings.EqualFold(clusterName, vmSetName) { - vmSetName = az.vmSet.GetPrimaryVMSetName() + vmSetName = az.VMSet.GetPrimaryVMSetName() } return vmSetName @@ -150,7 +150,7 @@ func (az *Cloud) getAzureLoadBalancerName(clusterName string, vmSetName string, clusterName = az.LoadBalancerName } lbNamePrefix := vmSetName - if strings.EqualFold(vmSetName, az.vmSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() { + if strings.EqualFold(vmSetName, az.VMSet.GetPrimaryVMSetName()) || az.useStandardLoadBalancer() { lbNamePrefix = clusterName } if isInternal { @@ -732,7 +732,7 @@ func (as *availabilitySet) EnsureHostInPool(service *v1.Service, nodeName types. return "", "", "", nil, nil } - klog.Errorf("error: az.EnsureHostInPool(%s), az.vmSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) + klog.Errorf("error: az.EnsureHostInPool(%s), az.VMSet.GetPrimaryInterface.Get(%s, %s), err=%v", nodeName, vmName, vmSetName, err) return "", "", "", nil, err } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go index 5b83b546207d..530d57f19de9 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_standard_test.go @@ -908,7 +908,7 @@ func TestGetStandardInstanceIDByNodeName(t *testing.T) { ID: to.StringPtr(invalidResouceID), }, nil).AnyTimes() - instanceID, err := cloud.vmSet.GetInstanceIDByNodeName(test.nodeName) + instanceID, err := cloud.VMSet.GetInstanceIDByNodeName(test.nodeName) assert.Equal(t, test.expectedErrMsg, err, test.name) assert.Equal(t, test.expectedID, instanceID, test.name) } @@ -979,7 +979,7 @@ func TestGetStandardVMPowerStatusByNodeName(t *testing.T) { mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface) mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes() - powerState, err := cloud.vmSet.GetPowerStatusByNodeName(test.nodeName) + powerState, err := cloud.VMSet.GetPowerStatusByNodeName(test.nodeName) assert.Equal(t, test.expectedErrMsg, err, test.name) assert.Equal(t, test.expectedStatus, powerState, test.name) } @@ -1064,7 +1064,7 @@ func TestGetStandardVMZoneByNodeName(t *testing.T) { mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface) mockVMClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nodeName, gomock.Any()).Return(test.vm, test.getErr).AnyTimes() - zone, err := cloud.vmSet.GetZoneByNodeName(test.nodeName) + zone, err := cloud.VMSet.GetZoneByNodeName(test.nodeName) assert.Equal(t, test.expectedErrMsg, err, test.name) assert.Equal(t, test.expectedZone, zone, test.name) } @@ -1166,7 +1166,7 @@ func TestGetStandardVMSetNames(t *testing.T) { mockVMClient := cloud.VirtualMachinesClient.(*mockvmclient.MockInterface) mockVMClient.EXPECT().List(gomock.Any(), cloud.ResourceGroup).Return(test.vm, nil).AnyTimes() - vmSetNames, err := cloud.vmSet.GetVMSetNames(test.service, test.nodes) + vmSetNames, err := cloud.VMSet.GetVMSetNames(test.service, test.nodes) assert.Equal(t, test.expectedErrMsg, err, test.name) assert.Equal(t, test.expectedVMSetNames, vmSetNames, test.name) } @@ -1339,7 +1339,7 @@ func TestStandardEnsureHostInPool(t *testing.T) { mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes() mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - _, _, _, vm, err := cloud.vmSet.EnsureHostInPool(test.service, test.nodeName, test.backendPoolID, test.vmSetName, false) + _, _, _, vm, err := cloud.VMSet.EnsureHostInPool(test.service, test.nodeName, test.backendPoolID, test.vmSetName, false) assert.Equal(t, test.expectedErrMsg, err, test.name) assert.Nil(t, vm, test.name) } @@ -1501,7 +1501,7 @@ func TestStandardEnsureHostsInPool(t *testing.T) { mockInterfaceClient.EXPECT().Get(gomock.Any(), cloud.ResourceGroup, test.nicName, gomock.Any()).Return(testNIC, nil).AnyTimes() mockInterfaceClient.EXPECT().CreateOrUpdate(gomock.Any(), cloud.ResourceGroup, gomock.Any(), gomock.Any()).Return(nil).AnyTimes() - err := cloud.vmSet.EnsureHostsInPool(test.service, test.nodes, test.backendPoolID, test.vmSetName, false) + err := cloud.VMSet.EnsureHostsInPool(test.service, test.nodes, test.backendPoolID, test.vmSetName, false) if test.expectedErr { assert.Equal(t, test.expectedErrMsg, err.Error(), test.name) } else { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go index f263f2f61ee9..31aecc8c2beb 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_test.go @@ -2180,7 +2180,7 @@ func TestGetNodeNameByProviderID(t *testing.T) { } for _, test := range providers { - name, err := az.vmSet.GetNodeNameByProviderID(test.providerID) + name, err := az.VMSet.GetNodeNameByProviderID(test.providerID) if (err != nil) != test.fail { t.Errorf("Expected to fail=%t, with pattern %v", test.fail, test) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go index 36c78002dee1..f3e546296256 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_zones.go @@ -87,7 +87,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { if err != nil { return cloudprovider.Zone{}, fmt.Errorf("failure getting hostname from kernel") } - return az.vmSet.GetZoneByNodeName(strings.ToLower(hostname)) + return az.VMSet.GetZoneByNodeName(strings.ToLower(hostname)) } // GetZoneByProviderID implements Zones.GetZoneByProviderID @@ -104,7 +104,7 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl return cloudprovider.Zone{}, nil } - nodeName, err := az.vmSet.GetNodeNameByProviderID(providerID) + nodeName, err := az.VMSet.GetNodeNameByProviderID(providerID) if err != nil { return cloudprovider.Zone{}, err } @@ -126,5 +126,5 @@ func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) return cloudprovider.Zone{}, nil } - return az.vmSet.GetZoneByNodeName(string(nodeName)) + return az.VMSet.GetZoneByNodeName(string(nodeName)) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD index f162bab2db35..283a95d042a9 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/BUILD @@ -19,6 +19,7 @@ go_library( "//vendor/github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest:go_default_library", "//vendor/github.com/Azure/go-autorest/autorest/azure:go_default_library", + "//vendor/github.com/Azure/go-autorest/autorest/to:go_default_library", "//vendor/k8s.io/klog/v2:go_default_library", ], ) diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go index 396510e6463f..fb0d5705182b 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/azure_diskclient.go @@ -20,12 +20,14 @@ package diskclient import ( "context" + "fmt" "net/http" "time" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/to" "k8s.io/client-go/util/flowcontrol" "k8s.io/klog/v2" @@ -246,3 +248,126 @@ func (c *Client) deleteDisk(ctx context.Context, resourceGroupName string, diskN return c.armClient.DeleteResource(ctx, resourceID, "") } + +// ListByResourceGroup lists all the disks under a resource group. +func (c *Client) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { + resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/disks", + autorest.Encode("path", c.subscriptionID), + autorest.Encode("path", resourceGroupName)) + + result := make([]compute.Disk, 0) + page := &DiskListPage{} + page.fn = c.listNextResults + + resp, rerr := c.armClient.GetResource(ctx, resourceID, "") + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.request", resourceID, rerr.Error()) + return result, rerr + } + + var err error + page.dl, err = c.listResponder(resp) + if err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.respond", resourceID, err) + return result, retry.GetError(resp, err) + } + + for page.NotDone() { + result = append(result, *page.Response().Value...) + if err = page.NextWithContext(ctx); err != nil { + klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "disk.list.next", resourceID, err) + return result, retry.GetError(page.Response().Response.Response, err) + } + } + + return result, nil +} + +// listNextResults retrieves the next set of results, if any. +func (c *Client) listNextResults(ctx context.Context, lastResults compute.DiskList) (result compute.DiskList, err error) { + req, err := c.diskListPreparer(ctx, lastResults) + if err != nil { + return result, autorest.NewErrorWithError(err, "diskclient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + + resp, rerr := c.armClient.Send(ctx, req) + defer c.armClient.CloseResponse(ctx, resp) + if rerr != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(rerr.Error(), "diskclient", "listNextResults", resp, "Failure sending next results request") + } + + result, err = c.listResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "diskclient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// listResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (c *Client) listResponder(resp *http.Response) (result compute.DiskList, err error) { + err = autorest.Respond( + resp, + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +func (c *Client) diskListPreparer(ctx context.Context, lr compute.DiskList) (*http.Request, error) { + if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lr.NextLink))) +} + +// DiskListPage contains a page of Disk values. +type DiskListPage struct { + fn func(context.Context, compute.DiskList) (compute.DiskList, error) + dl compute.DiskList +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DiskListPage) NextWithContext(ctx context.Context) (err error) { + next, err := page.fn(ctx, page.dl) + if err != nil { + return err + } + page.dl = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DiskListPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DiskListPage) NotDone() bool { + return !page.dl.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DiskListPage) Response() compute.DiskList { + return page.dl +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DiskListPage) Values() []compute.Disk { + if page.dl.IsEmpty() { + return nil + } + return *page.dl.Value +} diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go index 3f4be1e5939e..4dd741693c49 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/interface.go @@ -42,4 +42,7 @@ type Interface interface { // Delete deletes a Disk by name. Delete(ctx context.Context, resourceGroupName string, diskName string) *retry.Error + + // ListByResourceGroup lists all the disks under a resource group. + ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) } diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient/interface.go b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient/interface.go index dfd26723592c..f6742e8bf7ed 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient/interface.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/clients/diskclient/mockdiskclient/interface.go @@ -92,3 +92,18 @@ func (mr *MockInterfaceMockRecorder) Delete(ctx, resourceGroupName, diskName int mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName, diskName) } + +// ListByResourceGroup mocks base method +func (m *MockInterface) ListByResourceGroup(ctx context.Context, resourceGroupName string) ([]compute.Disk, *retry.Error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListByResourceGroup", ctx, resourceGroupName) + ret0, _ := ret[0].([]compute.Disk) + ret1, _ := ret[1].(*retry.Error) + return ret0, ret1 +} + +// ListByResourceGroup indicates an expected call of ListByResourceGroup +func (mr *MockInterfaceMockRecorder) ListByResourceGroup(ctx, resourceGroupName interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListByResourceGroup", reflect.TypeOf((*MockInterface)(nil).Delete), ctx, resourceGroupName) +}