Skip to content

Commit

Permalink
add source in azure metrics
Browse files Browse the repository at this point in the history
add source in azure metrics

revert back zz_generated.conversion.go
  • Loading branch information
andyzhangx committed May 10, 2019
1 parent ef9e794 commit 00c972c
Show file tree
Hide file tree
Showing 9 changed files with 63 additions and 74 deletions.
16 changes: 2 additions & 14 deletions staging/src/k8s.io/legacy-cloud-providers/azure/azure_backoff.go
Expand Up @@ -550,25 +550,13 @@ func (az *Cloud) deleteRouteWithRetry(routeName string) error {
})
}

// CreateOrUpdateVMWithRetry invokes az.VirtualMachinesClient.CreateOrUpdate with exponential backoff retry
func (az *Cloud) CreateOrUpdateVMWithRetry(resourceGroup, vmName string, newVM compute.VirtualMachine) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()

resp, err := az.VirtualMachinesClient.CreateOrUpdate(ctx, resourceGroup, vmName, newVM)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s): end", vmName)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
}

// UpdateVmssVMWithRetry invokes az.VirtualMachineScaleSetVMsClient.Update with exponential backoff retry
func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) error {
func (az *Cloud) UpdateVmssVMWithRetry(resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) error {
return wait.ExponentialBackoff(az.requestBackoff(), func() (bool, error) {
ctx, cancel := getContextWithCancel()
defer cancel()

resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters)
resp, err := az.VirtualMachineScaleSetVMsClient.Update(ctx, resourceGroupName, VMScaleSetName, instanceID, parameters, source)
klog.V(10).Infof("VirtualMachinesClient.CreateOrUpdate(%s,%s): end", VMScaleSetName, instanceID)
return az.processHTTPRetryResponse(nil, "", resp, err)
})
Expand Down
90 changes: 45 additions & 45 deletions staging/src/k8s.io/legacy-cloud-providers/azure/azure_client.go

Large diffs are not rendered by default.

Expand Up @@ -84,7 +84,7 @@ func (as *availabilitySet) AttachDisk(isManagedDisk bool, diskName, diskURI stri
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)

_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
_, err = as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "attach_disk")
if err != nil {
klog.Errorf("azureDisk - attach disk(%s, %s) failed, err: %v", diskName, diskURI, err)
detail := err.Error()
Expand Down Expand Up @@ -151,7 +151,7 @@ func (as *availabilitySet) DetachDisk(diskName, diskURI string, nodeName types.N
// Invalidate the cache right after updating
defer as.cloud.vmCache.Delete(vmName)

return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM)
return as.VirtualMachinesClient.CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM, "detach_disk")
}

// GetDataDisks gets a list of data disks attached to the node.
Expand Down
Expand Up @@ -89,7 +89,7 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod
defer ss.vmssVMCache.Delete(key)

klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
_, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk")
if err != nil {
detail := err.Error()
if strings.Contains(detail, errLeaseFailed) || strings.Contains(detail, errDiskBlobNotFound) {
Expand Down Expand Up @@ -159,7 +159,7 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName
defer ss.vmssVMCache.Delete(key)

klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI)
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk")
}

// GetDataDisks gets a list of data disks attached to the node.
Expand Down
Expand Up @@ -290,7 +290,7 @@ func newFakeAzureVirtualMachinesClient() *fakeAzureVirtualMachinesClient {
return fVMC
}

func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine) (resp *http.Response, err error) {
func (fVMC *fakeAzureVirtualMachinesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, VMName string, parameters compute.VirtualMachine, source string) (resp *http.Response, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()

Expand Down Expand Up @@ -550,7 +550,7 @@ func (fVMC *fakeVirtualMachineScaleSetVMsClient) GetInstanceView(ctx context.Con
return result, nil
}

func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM) (resp *http.Response, err error) {
func (fVMC *fakeVirtualMachineScaleSetVMsClient) Update(ctx context.Context, resourceGroupName string, VMScaleSetName string, instanceID string, parameters compute.VirtualMachineScaleSetVM, source string) (resp *http.Response, err error) {
fVMC.mutex.Lock()
defer fVMC.mutex.Unlock()

Expand Down
Expand Up @@ -33,6 +33,7 @@ var (
"request", // API function that is being invoked
"resource_group", // Resource group of the resource being monitored
"subscription_id", // Subscription ID of the resource being monitored
"source", // Oeration source(optional)
}

apiMetrics = registerAPIMetrics(metricLabels...)
Expand All @@ -43,10 +44,10 @@ type metricContext struct {
attributes []string
}

func newMetricContext(prefix, request, resourceGroup, subscriptionID string) *metricContext {
func newMetricContext(prefix, request, resourceGroup, subscriptionID, source string) *metricContext {
return &metricContext{
start: time.Now(),
attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID},
attributes: []string{prefix + "_" + request, strings.ToLower(resourceGroup), subscriptionID, source},
}
}

Expand Down
Expand Up @@ -23,12 +23,12 @@ import (
)

func TestAzureMetricLabelCardinality(t *testing.T) {
mc := newMetricContext("test", "create", "resource_group", "subscription_id")
mc := newMetricContext("test", "create", "resource_group", "subscription_id", "source")
assert.Len(t, mc.attributes, len(metricLabels), "cardinalities of labels and values must match")
}

func TestAzureMetricLabelPrefix(t *testing.T) {
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id")
mc := newMetricContext("prefix", "request", "resource_group", "subscription_id", "source")
found := false
for _, attribute := range mc.attributes {
if attribute == "prefix_request" {
Expand Down
Expand Up @@ -1108,7 +1108,7 @@ func getClusterResources(az *Cloud, vmCount int, availabilitySetCount int) (clus

vmCtx, vmCancel := getContextWithCancel()
defer vmCancel()
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM)
_, err := az.VirtualMachinesClient.CreateOrUpdate(vmCtx, az.Config.ResourceGroup, vmName, newVM, "")
if err != nil {
}
// add to kubernetes
Expand Down
8 changes: 4 additions & 4 deletions staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go
Expand Up @@ -712,10 +712,10 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam
ctx, cancel := getContextWithCancel()
defer cancel()
klog.V(2).Infof("EnsureHostInPool begins to update vmssVM(%s) with new backendPoolID %s", vmName, backendPoolID)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update")
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
klog.V(2).Infof("EnsureHostInPool update backing off vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update")
if retryErr != nil {
err = retryErr
klog.Errorf("EnsureHostInPool update abort backoff vmssVM(%s) with new backendPoolID %s, err: %v", vmName, backendPoolID, err)
Expand Down Expand Up @@ -841,10 +841,10 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa
ctx, cancel := getContextWithCancel()
defer cancel()
klog.V(2).Infof("ensureBackendPoolDeletedFromNode begins to update vmssVM(%s) with backendPoolID %s", nodeName, backendPoolID)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM)
resp, err := ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "network_update")
if ss.CloudProviderBackoff && shouldRetryHTTPRequest(resp, err) {
klog.V(2).Infof("ensureBackendPoolDeletedFromNode update backing off vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM)
retryErr := ss.UpdateVmssVMWithRetry(nodeResourceGroup, ssName, instanceID, newVM, "network_update")
if retryErr != nil {
err = retryErr
klog.Errorf("ensureBackendPoolDeletedFromNode update abort backoff vmssVM(%s) with backendPoolID %s, err: %v", nodeName, backendPoolID, err)
Expand Down

0 comments on commit 00c972c

Please sign in to comment.