From 9b7a4868f29bfbfd22dd0435103303cc4045695e Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Thu, 18 Nov 2021 15:23:50 -0600 Subject: [PATCH 1/7] move and rename ipstate and change allocated to assigned Signed-off-by: Evan Baker typo --- cns/NetworkContainerContract.go | 18 ++-------- cns/api.go | 2 +- cns/client/client.go | 4 +-- cns/client/client_test.go | 22 ++++++------ cns/cmd/cli/cli.go | 29 ++++++++-------- cns/fakes/cnsfake.go | 12 +++---- cns/fakes/requestcontrollerfake.go | 3 +- cns/filter/ipam.go | 35 ++++++++++--------- cns/filter/ipam_test.go | 19 ++++++----- cns/restserver/internalapi_test.go | 20 +++++------ cns/restserver/ipam.go | 32 ++++++++--------- cns/restserver/ipam_test.go | 55 +++++++++++++++--------------- cns/restserver/util.go | 10 +++--- cns/types/ipconfig.go | 16 +++++++++ 14 files changed, 144 insertions(+), 133 deletions(-) create mode 100644 cns/types/ipconfig.go diff --git a/cns/NetworkContainerContract.go b/cns/NetworkContainerContract.go index d379ca5875..338b6c8d16 100644 --- a/cns/NetworkContainerContract.go +++ b/cns/NetworkContainerContract.go @@ -7,6 +7,8 @@ import ( "net" "strconv" "strings" + + "github.com/Azure/azure-container-networking/cns/types" ) // Container Network Service DNC Contract @@ -60,20 +62,6 @@ const ( Vxlan = "Vxlan" ) -// IPConfig States for CNS IPAM -type IPConfigState string - -const ( - // Available IPConfigState for available IPs. - Available IPConfigState = "Available" - // Allocated IPConfigState for allocated IPs. - Allocated IPConfigState = "Allocated" - // PendingRelease IPConfigState for pending release IPs. - PendingRelease IPConfigState = "PendingRelease" - // PendingProgramming IPConfigState for pending programming IPs. - PendingProgramming IPConfigState = "PendingProgramming" -) - // ChannelMode :- CNS channel modes const ( Direct = "Direct" @@ -373,7 +361,7 @@ type IPConfigResponse struct { // GetIPAddressesRequest is used in CNS IPAM mode to get the states of IPConfigs // The IPConfigStateFilter is a slice of IPs to fetch from CNS that match those states type GetIPAddressesRequest struct { - IPConfigStateFilter []IPConfigState + IPConfigStateFilter []types.IPState } // GetIPAddressStateResponse is used in CNS IPAM mode as a response to get IP address state diff --git a/cns/api.go b/cns/api.go index 816849fa12..18ede3d88f 100644 --- a/cns/api.go +++ b/cns/api.go @@ -54,7 +54,7 @@ type IPConfigurationStatus struct { NCID string ID string // uuid IPAddress string - State IPConfigState + State types.IPState PodInfo PodInfo } diff --git a/cns/client/client.go b/cns/client/client.go index 23a2e7c563..a6ce8e6efc 100644 --- a/cns/client/client.go +++ b/cns/client/client.go @@ -303,8 +303,8 @@ func (c *Client) ReleaseIPAddress(ctx context.Context, ipconfig cns.IPConfigRequ } // GetIPAddressesMatchingStates takes a variadic number of string parameters, to get all IP Addresses matching a number of states -// usage GetIPAddressesWithStates(cns.Available, cns.Allocated) -func (c *Client) GetIPAddressesMatchingStates(ctx context.Context, stateFilter ...cns.IPConfigState) ([]cns.IPConfigurationStatus, error) { +// usage GetIPAddressesWithStates(types.Available, cns.Allocated) +func (c *Client) GetIPAddressesMatchingStates(ctx context.Context, stateFilter ...types.IPState) ([]cns.IPConfigurationStatus, error) { if len(stateFilter) == 0 { return nil, nil } diff --git a/cns/client/client_test.go b/cns/client/client_test.go index a04229ebe1..6479f0342d 100644 --- a/cns/client/client_test.go +++ b/cns/client/client_test.go @@ -284,12 +284,12 @@ func TestCNSClientRequestAndRelease(t *testing.T) { assert.Equal(t, desired, resultIPnet, "Desired result not matching actual result") // checking for allocated IP address and pod context printing before ReleaseIPAddress is called - ipaddresses, err := cnsClient.GetIPAddressesMatchingStates(context.TODO(), cns.Allocated) + ipaddresses, err := cnsClient.GetIPAddressesMatchingStates(context.TODO(), types.Assigned) assert.NoError(t, err, "Get allocated IP addresses failed") assert.Len(t, ipaddresses, 1, "Number of available IP addresses expected to be 1") assert.Equal(t, desiredIpAddress, ipaddresses[0].IPAddress, "Available IP address does not match expected, address state") - assert.Equal(t, cns.Allocated, ipaddresses[0].State, "Available IP address does not match expected, address state") + assert.Equal(t, types.Assigned, ipaddresses[0].State, "Available IP address does not match expected, address state") t.Log(ipaddresses) @@ -356,7 +356,7 @@ func TestCNSClientDebugAPI(t *testing.T) { podConfig := inmemory.HTTPRestServiceData.PodIPConfigState for _, v := range podConfig { assert.Equal(t, "10.0.0.5", v.IPAddress, "Not the expected set values for testing IPConfigurationStatus, %+v", podConfig) - assert.Equal(t, cns.Allocated, v.State, "Not the expected set values for testing IPConfigurationStatus, %+v", podConfig) + assert.Equal(t, types.Assigned, v.State, "Not the expected set values for testing IPConfigurationStatus, %+v", podConfig) assert.Equal(t, "testNcId1", v.NCID, "Not the expected set values for testing IPConfigurationStatus, %+v", podConfig) } assert.GreaterOrEqual(t, len(inmemory.HTTPRestServiceData.PodIPConfigState), 1, "PodIpConfigState with at least 1 entry expected") @@ -1108,7 +1108,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { tests := []struct { name string ctx context.Context - stateFilter []cns.IPConfigState + stateFilter []types.IPState mockdo *mockdo routes map[string]url.URL want []cns.IPConfigurationStatus @@ -1117,7 +1117,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "happy case", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{cns.Available}, + stateFilter: []types.IPState{types.Available}, mockdo: &mockdo{ errToReturn: nil, objToReturn: &cns.GetIPAddressStatusResponse{ @@ -1132,7 +1132,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "length of zero", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{}, + stateFilter: []types.IPState{}, mockdo: &mockdo{}, routes: emptyRoutes, want: nil, @@ -1141,7 +1141,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "bad request", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{"garbage"}, + stateFilter: []types.IPState{"garbage"}, mockdo: &mockdo{ errToReturn: errBadRequest, objToReturn: nil, @@ -1154,7 +1154,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "bad decoding", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{cns.Available}, + stateFilter: []types.IPState{types.Available}, mockdo: &mockdo{ errToReturn: nil, objToReturn: []cns.GetIPAddressStatusResponse{}, @@ -1167,7 +1167,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "http status not ok", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{cns.Available}, + stateFilter: []types.IPState{types.Available}, mockdo: &mockdo{ errToReturn: nil, objToReturn: nil, @@ -1180,7 +1180,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "cns return code not zero", ctx: context.TODO(), - stateFilter: []cns.IPConfigState{cns.Available}, + stateFilter: []types.IPState{types.Available}, mockdo: &mockdo{ errToReturn: nil, objToReturn: &cns.GetIPAddressStatusResponse{ @@ -1197,7 +1197,7 @@ func TestGetIPAddressesMatchingStates(t *testing.T) { { name: "nil context", ctx: nil, - stateFilter: []cns.IPConfigState{cns.Available}, + stateFilter: []types.IPState{types.Available}, mockdo: &mockdo{}, routes: emptyRoutes, wantErr: true, diff --git a/cns/cmd/cli/cli.go b/cns/cmd/cli/cli.go index 832a0d515c..ce60ba16ec 100644 --- a/cns/cmd/cli/cli.go +++ b/cns/cmd/cli/cli.go @@ -9,6 +9,7 @@ import ( "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/client" + "github.com/Azure/azure-container-networking/cns/types" ) const ( @@ -41,26 +42,26 @@ func HandleCNSClientCommands(ctx context.Context, cmd string, arg string) error } func getCmd(ctx context.Context, client *client.Client, arg string) error { - var states []cns.IPConfigState + var states []types.IPState - switch cns.IPConfigState(arg) { - case cns.Available: - states = append(states, cns.Available) + switch types.IPState(arg) { + case types.Available: + states = append(states, types.Available) - case cns.Allocated: - states = append(states, cns.Allocated) + case types.Assigned: + states = append(states, types.Assigned) - case cns.PendingRelease: - states = append(states, cns.PendingRelease) + case types.PendingRelease: + states = append(states, types.PendingRelease) - case cns.PendingProgramming: - states = append(states, cns.PendingProgramming) + case types.PendingProgramming: + states = append(states, types.PendingProgramming) default: - states = append(states, cns.Allocated) - states = append(states, cns.Available) - states = append(states, cns.PendingRelease) - states = append(states, cns.PendingProgramming) + states = append(states, types.Assigned) + states = append(states, types.Available) + states = append(states, types.PendingRelease) + states = append(states, types.PendingProgramming) } addr, err := client.GetIPAddressesMatchingStates(ctx, states...) diff --git a/cns/fakes/cnsfake.go b/cns/fakes/cnsfake.go index 7fb9f9dfc9..8570ea2059 100644 --- a/cns/fakes/cnsfake.go +++ b/cns/fakes/cnsfake.go @@ -69,14 +69,14 @@ func (ipm *IPStateManager) AddIPConfigs(ipconfigs []cns.IPConfigurationStatus) { defer ipm.Unlock() for _, ipconfig := range ipconfigs { switch ipconfig.State { - case cns.PendingProgramming: + case types.PendingProgramming: ipm.PendingProgramIPConfigState[ipconfig.ID] = ipconfig - case cns.Available: + case types.Available: ipm.AvailableIPConfigState[ipconfig.ID] = ipconfig ipm.AvailableIPIDStack.Push(ipconfig.ID) - case cns.Allocated: + case types.Assigned: ipm.AllocatedIPConfigState[ipconfig.ID] = ipconfig - case cns.PendingRelease: + case types.PendingRelease: ipm.PendingReleaseIPConfigState[ipconfig.ID] = ipconfig } } @@ -123,7 +123,7 @@ func (ipm *IPStateManager) MarkIPAsPendingRelease(numberOfIPsToMark int) (map[st // if there was an error, and not all ip's have been freed, restore state if err != nil && len(pendingReleaseIPs) != numberOfIPsToMark { for uuid, ipState := range pendingReleaseIPs { - ipState.State = cns.Available + ipState.State = types.Available ipm.AvailableIPIDStack.Push(pendingReleaseIPs[uuid].ID) ipm.AvailableIPConfigState[pendingReleaseIPs[uuid].ID] = ipState delete(ipm.PendingReleaseIPConfigState, pendingReleaseIPs[uuid].ID) @@ -139,7 +139,7 @@ func (ipm *IPStateManager) MarkIPAsPendingRelease(numberOfIPsToMark int) (map[st // add all pending release to a slice ipConfig := ipm.AvailableIPConfigState[id] - ipConfig.State = cns.PendingRelease + ipConfig.State = types.PendingRelease pendingReleaseIPs[id] = ipConfig delete(ipm.AvailableIPConfigState, id) diff --git a/cns/fakes/requestcontrollerfake.go b/cns/fakes/requestcontrollerfake.go index 2447d4548c..7301941d6d 100644 --- a/cns/fakes/requestcontrollerfake.go +++ b/cns/fakes/requestcontrollerfake.go @@ -8,6 +8,7 @@ import ( "net" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" "github.com/google/uuid" ) @@ -55,7 +56,7 @@ func (rc *RequestControllerFake) CarveIPConfigsAndAddToStatusAndCNS(numberOfIPCo ipconfigCNS := cns.IPConfigurationStatus{ ID: ipconfigCRD.Name, IPAddress: ipconfigCRD.IP, - State: cns.Available, + State: types.Available, } cnsIPConfigs = append(cnsIPConfigs, ipconfigCNS) diff --git a/cns/filter/ipam.go b/cns/filter/ipam.go index 4d61e5ec87..ecee37f72e 100644 --- a/cns/filter/ipam.go +++ b/cns/filter/ipam.go @@ -1,30 +1,33 @@ package filter -import "github.com/Azure/azure-container-networking/cns" +import ( + "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/types" +) type IPConfigStatePredicate func(ipconfig cns.IPConfigurationStatus) bool var ( - // StateAllocated is a preset filter for cns.Allocated. - StateAllocated = ipConfigStatePredicate(cns.Allocated) - // StateAvailable is a preset filter for cns.Available. - StateAvailable = ipConfigStatePredicate(cns.Available) - // StatePendingProgramming is a preset filter for cns.PendingProgramming. - StatePendingProgramming = ipConfigStatePredicate(cns.PendingProgramming) - // StatePendingRelease is a preset filter for cns.PendingRelease. - StatePendingRelease = ipConfigStatePredicate(cns.PendingRelease) + // StateAssigned is a preset filter for cns.Allocated. + StateAssigned = ipConfigStatePredicate(types.Assigned) + // StateAvailable is a preset filter for types.Available. + StateAvailable = ipConfigStatePredicate(types.Available) + // StatePendingProgramming is a preset filter for types.PendingProgramming. + StatePendingProgramming = ipConfigStatePredicate(types.PendingProgramming) + // StatePendingRelease is a preset filter for types.PendingRelease. + StatePendingRelease = ipConfigStatePredicate(types.PendingRelease) ) -var filters = map[cns.IPConfigState]IPConfigStatePredicate{ - cns.Allocated: StateAllocated, - cns.Available: StateAvailable, - cns.PendingProgramming: StatePendingProgramming, - cns.PendingRelease: StatePendingRelease, +var filters = map[types.IPState]IPConfigStatePredicate{ + types.Assigned: StateAssigned, + types.Available: StateAvailable, + types.PendingProgramming: StatePendingProgramming, + types.PendingRelease: StatePendingRelease, } // ipConfigStatePredicate returns a predicate function that compares an IPConfigurationStatus.State to // the passed State string and returns true when equal. -func ipConfigStatePredicate(test cns.IPConfigState) IPConfigStatePredicate { +func ipConfigStatePredicate(test types.IPState) IPConfigStatePredicate { return func(ipconfig cns.IPConfigurationStatus) bool { return ipconfig.State == test } @@ -58,7 +61,7 @@ func MatchAnyIPConfigState(in map[string]cns.IPConfigurationStatus, predicates . // PredicatesForStates returns a slice of IPConfigStatePredicates matches // that map to the input IPConfigStates. -func PredicatesForStates(states ...cns.IPConfigState) []IPConfigStatePredicate { +func PredicatesForStates(states ...types.IPState) []IPConfigStatePredicate { var predicates []IPConfigStatePredicate for _, state := range states { if f, ok := filters[state]; ok { diff --git a/cns/filter/ipam_test.go b/cns/filter/ipam_test.go index c7cea1b943..b553a7ae32 100644 --- a/cns/filter/ipam_test.go +++ b/cns/filter/ipam_test.go @@ -5,39 +5,40 @@ import ( "testing" "github.com/Azure/azure-container-networking/cns" + "github.com/Azure/azure-container-networking/cns/types" "github.com/stretchr/testify/assert" ) var testStatuses = []struct { - State cns.IPConfigState + State types.IPState Status cns.IPConfigurationStatus }{ { - State: cns.Allocated, + State: types.Assigned, Status: cns.IPConfigurationStatus{ ID: "allocated", - State: cns.Allocated, + State: types.Assigned, }, }, { - State: cns.Available, + State: types.Available, Status: cns.IPConfigurationStatus{ ID: "available", - State: cns.Available, + State: types.Available, }, }, { - State: cns.PendingProgramming, + State: types.PendingProgramming, Status: cns.IPConfigurationStatus{ ID: "pending-programming", - State: cns.PendingProgramming, + State: types.PendingProgramming, }, }, { - State: cns.PendingRelease, + State: types.PendingRelease, Status: cns.IPConfigurationStatus{ ID: "pending-release", - State: cns.PendingRelease, + State: types.PendingRelease, }, }, } diff --git a/cns/restserver/internalapi_test.go b/cns/restserver/internalapi_test.go index f00f8b55a7..5e39cd5852 100644 --- a/cns/restserver/internalapi_test.go +++ b/cns/restserver/internalapi_test.go @@ -154,8 +154,8 @@ func TestPendingIPsGotUpdatedWhenSyncHostNCVersion(t *testing.T) { } for i := range receivedSecondaryIPConfigs { podIPConfigState := svc.PodIPConfigState[i] - if podIPConfigState.State != cns.PendingProgramming { - t.Errorf("Unexpected State %s, expeted State is %s, received %s, IP address is %s", podIPConfigState.State, cns.PendingProgramming, podIPConfigState.State, podIPConfigState.IPAddress) + if podIPConfigState.State != types.PendingProgramming { + t.Errorf("Unexpected State %s, expeted State is %s, received %s, IP address is %s", podIPConfigState.State, types.PendingProgramming, podIPConfigState.State, podIPConfigState.IPAddress) } } svc.SyncHostNCVersion(context.Background(), cns.CRD) @@ -167,8 +167,8 @@ func TestPendingIPsGotUpdatedWhenSyncHostNCVersion(t *testing.T) { } for i := range receivedSecondaryIPConfigs { podIPConfigState := svc.PodIPConfigState[i] - if podIPConfigState.State != cns.Available { - t.Errorf("Unexpected State %s, expeted State is %s, received %s, IP address is %s", podIPConfigState.State, cns.Available, podIPConfigState.State, podIPConfigState.IPAddress) + if podIPConfigState.State != types.Available { + t.Errorf("Unexpected State %s, expeted State is %s, received %s, IP address is %s", podIPConfigState.State, types.Available, podIPConfigState.State, podIPConfigState.IPAddress) } } } @@ -462,12 +462,12 @@ func validateNetworkRequest(t *testing.T, req cns.CreateNetworkContainerRequest) t.Fatalf("Failed as Secondary IP count doesnt match in PodIpConfig state, expected:%d, actual %d", len(req.SecondaryIPConfigs), len(svc.PodIPConfigState)) } - var expectedIPStatus cns.IPConfigState + var expectedIPStatus types.IPState // 0 is the default NMAgent version return from fake GetNetworkContainerInfoFromHost if containerStatus.CreateNetworkContainerRequest.Version > "0" { - expectedIPStatus = cns.PendingProgramming + expectedIPStatus = types.PendingProgramming } else { - expectedIPStatus = cns.Available + expectedIPStatus = types.Available } t.Logf("NC version in container status is %s, HostVersion is %s", containerStatus.CreateNetworkContainerRequest.Version, containerStatus.HostVersion) alreadyValidated := make(map[string]string) @@ -483,7 +483,7 @@ func validateNetworkRequest(t *testing.T, req cns.CreateNetworkContainerRequest) // Validate IP state if ipStatus.PodInfo != nil { if _, exists := svc.PodIPIDByPodInterfaceKey[ipStatus.PodInfo.Key()]; exists { - if ipStatus.State != cns.Allocated { + if ipStatus.State != types.Assigned { t.Fatalf("IPId: %s State is not Allocated, ipStatus: %+v", ipid, ipStatus) } } else { @@ -552,7 +552,7 @@ func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkCon ipId := svc.PodIPIDByPodInterfaceKey[podInfo.Key()] ipConfigstate := svc.PodIPConfigState[ipId] - if ipConfigstate.State != cns.Allocated { + if ipConfigstate.State != types.Assigned { t.Fatalf("IpAddress %s is not marked as allocated for Pod: %+v, ipState: %+v", ipaddress, podInfo, ipConfigstate) } @@ -582,7 +582,7 @@ func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkCon // Validate IP state if secIpConfigState, found := svc.PodIPConfigState[secIpId]; found { - if secIpConfigState.State != cns.Available { + if secIpConfigState.State != types.Available { t.Fatalf("IPId: %s State is not Available, ipStatus: %+v", secIpId, secIpConfigState) } } else { diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index a0d17a5187..1b1cf3a3d3 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -112,8 +112,8 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m defer service.Unlock() for uuid, existingIpConfig := range service.PodIPConfigState { - if existingIpConfig.State == cns.PendingProgramming { - updatedIpConfig, err := service.updateIPConfigState(uuid, cns.PendingRelease, existingIpConfig.PodInfo) + if existingIpConfig.State == types.PendingProgramming { + updatedIpConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) if err != nil { return nil, err } @@ -127,8 +127,8 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m // if not all expected IPs are set to PendingRelease, then check the Available IPs for uuid, existingIpConfig := range service.PodIPConfigState { - if existingIpConfig.State == cns.Available { - updatedIpConfig, err := service.updateIPConfigState(uuid, cns.PendingRelease, existingIpConfig.PodInfo) + if existingIpConfig.State == types.Available { + updatedIpConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) if err != nil { return nil, err } @@ -145,7 +145,7 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m return pendingReleasedIps, nil } -func (service *HTTPRestService) updateIPConfigState(ipID string, updatedState cns.IPConfigState, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { +func (service *HTTPRestService) updateIPConfigState(ipID string, updatedState types.IPState, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { if ipConfig, found := service.PodIPConfigState[ipID]; found { logger.Printf("[updateIPConfigState] Changing IpId [%s] state to [%s], podInfo [%+v]. Current config [%+v]", ipID, updatedState, podInfo, ipConfig) ipConfig.State = updatedState @@ -175,8 +175,8 @@ func (service *HTTPRestService) MarkIpsAsAvailableUntransacted(ncID string, newH for uuid, secondaryIPConfigs := range ncInfo.CreateNetworkContainerRequest.SecondaryIPConfigs { if ipConfigStatus, exist := service.PodIPConfigState[uuid]; !exist { logger.Errorf("IP %s with uuid as %s exist in service state Secondary IP list but can't find in PodIPConfigState", ipConfigStatus.IPAddress, uuid) - } else if ipConfigStatus.State == cns.PendingProgramming && secondaryIPConfigs.NCVersion <= newHostNCVersion { - _, err := service.updateIPConfigState(uuid, cns.Available, nil) + } else if ipConfigStatus.State == types.PendingProgramming && secondaryIPConfigs.NCVersion <= newHostNCVersion { + _, err := service.updateIPConfigState(uuid, types.Available, nil) if err != nil { logger.Errorf("Error updating IPConfig [%+v] state to Available, err: %+v", ipConfigStatus, err) } @@ -184,7 +184,7 @@ func (service *HTTPRestService) MarkIpsAsAvailableUntransacted(ncID string, newH // Following 2 sentence assign new host version to secondary ip config. secondaryIPConfigs.NCVersion = newHostNCVersion ncInfo.CreateNetworkContainerRequest.SecondaryIPConfigs[uuid] = secondaryIPConfigs - logger.Printf("Change ip %s with uuid %s from pending programming to %s, current secondary ip configs is %+v", ipConfigStatus.IPAddress, uuid, cns.Available, + logger.Printf("Change ip %s with uuid %s from pending programming to %s, current secondary ip configs is %+v", ipConfigStatus.IPAddress, uuid, types.Available, ncInfo.CreateNetworkContainerRequest.SecondaryIPConfigs[uuid]) } } @@ -252,7 +252,7 @@ func (service *HTTPRestService) handleDebugIPAddresses(w http.ResponseWriter, r func (service *HTTPRestService) GetAllocatedIPConfigs() []cns.IPConfigurationStatus { service.RLock() defer service.RUnlock() - return filter.MatchAnyIPConfigState(service.PodIPConfigState, filter.StateAllocated) + return filter.MatchAnyIPConfigState(service.PodIPConfigState, filter.StateAssigned) } // GetAvailableIPConfigs returns a filtered list of IPs which are in @@ -282,7 +282,7 @@ func (service *HTTPRestService) GetPendingReleaseIPConfigs() []cns.IPConfigurati // SetIPConfigAsAllocated takes a lock of the service, and sets the ipconfig in the CNS state as allocated. // Does not take a lock. func (service *HTTPRestService) setIPConfigAsAllocated(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) error { - ipconfig, err := service.updateIPConfigState(ipconfig.ID, cns.Allocated, podInfo) + ipconfig, err := service.updateIPConfigState(ipconfig.ID, types.Assigned, podInfo) if err != nil { return err } @@ -293,7 +293,7 @@ func (service *HTTPRestService) setIPConfigAsAllocated(ipconfig cns.IPConfigurat // SetIPConfigAsAllocated and sets the ipconfig in the CNS state as allocated, does not take a lock func (service *HTTPRestService) setIPConfigAsAvailable(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { - ipconfig, err := service.updateIPConfigState(ipconfig.ID, cns.Available, nil) + ipconfig, err := service.updateIPConfigState(ipconfig.ID, types.Available, nil) if err != nil { return cns.IPConfigurationStatus{}, err } @@ -340,12 +340,12 @@ func (service *HTTPRestService) MarkExistingIPsAsPending(pendingIPIDs []string) for _, id := range pendingIPIDs { if ipconfig, exists := service.PodIPConfigState[id]; exists { - if ipconfig.State == cns.Allocated { + if ipconfig.State == types.Assigned { return fmt.Errorf("Failed to mark IP [%v] as pending, currently allocated", id) } logger.Printf("[MarkExistingIPsAsPending]: Marking IP [%+v] to PendingRelease", ipconfig) - ipconfig.State = cns.PendingRelease + ipconfig.State = types.PendingRelease service.PodIPConfigState[id] = ipconfig } else { logger.Errorf("Inconsistent state, ipconfig with ID [%v] marked as pending release, but does not exist in state", id) @@ -385,7 +385,7 @@ func (service *HTTPRestService) AllocateDesiredIPConfig(podInfo cns.PodInfo, des found := false for _, ipConfig := range service.PodIPConfigState { if ipConfig.IPAddress == desiredIpAddress { - if ipConfig.State == cns.Allocated { + if ipConfig.State == types.Assigned { // This IP has already been allocated, if it is allocated to same pod, then return the same // IPconfiguration if ipConfig.PodInfo.Key() == podInfo.Key() { @@ -394,7 +394,7 @@ func (service *HTTPRestService) AllocateDesiredIPConfig(podInfo cns.PodInfo, des } else { return podIpInfo, fmt.Errorf("[AllocateDesiredIPConfig] Desired IP is already allocated %+v, requested for pod %+v", ipConfig, podInfo) } - } else if ipConfig.State == cns.Available || ipConfig.State == cns.PendingProgramming { + } else if ipConfig.State == types.Available || ipConfig.State == types.PendingProgramming { // This race can happen during restart, where CNS state is lost and thus we have lost the NC programmed version // As part of reconcile, we mark IPs as Allocated which are already allocated to PODs (listed from APIServer) if err := service.setIPConfigAsAllocated(ipConfig, podInfo); err != nil { @@ -419,7 +419,7 @@ func (service *HTTPRestService) AllocateAnyAvailableIPConfig(podInfo cns.PodInfo defer service.Unlock() for _, ipState := range service.PodIPConfigState { - if ipState.State == cns.Available { + if ipState.State == types.Available { if err := service.setIPConfigAsAllocated(ipState, podInfo); err != nil { return cns.PodIpInfo{}, err } diff --git a/cns/restserver/ipam_test.go b/cns/restserver/ipam_test.go index 4f922e5262..2d8fe18aec 100644 --- a/cns/restserver/ipam_test.go +++ b/cns/restserver/ipam_test.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/common" "github.com/Azure/azure-container-networking/cns/fakes" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" ) @@ -50,7 +51,7 @@ func newSecondaryIPConfig(ipAddress string, ncVersion int) cns.SecondaryIPConfig } } -func NewPodState(ipaddress string, prefixLength uint8, id, ncid string, state cns.IPConfigState, ncVersion int) cns.IPConfigurationStatus { +func NewPodState(ipaddress string, prefixLength uint8, id, ncid string, state types.IPState, ncVersion int) cns.IPConfigurationStatus { ipconfig := newSecondaryIPConfig(ipaddress, ncVersion) return cns.IPConfigurationStatus{ @@ -114,7 +115,7 @@ func requestIpAddressAndGetState(t *testing.T, req cns.IPConfigRequest) (cns.IPC return ipState, err } -func NewPodStateWithOrchestratorContext(ipaddress, id, ncid string, state cns.IPConfigState, prefixLength uint8, ncVersion int, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { +func NewPodStateWithOrchestratorContext(ipaddress, id, ncid string, state types.IPState, prefixLength uint8, ncVersion int, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { ipconfig := newSecondaryIPConfig(ipaddress, ncVersion) return cns.IPConfigurationStatus{ IPAddress: ipconfig.IPAddress, @@ -143,7 +144,7 @@ func UpdatePodIpConfigState(t *testing.T, svc *HTTPRestService, ipconfigs map[st // update ipconfigs to expected state for ipId, ipconfig := range ipconfigs { - if ipconfig.State == cns.Allocated { + if ipconfig.State == types.Assigned { svc.PodIPIDByPodInterfaceKey[ipconfig.PodInfo.Key()] = ipId svc.PodIPConfigState[ipId] = ipconfig } @@ -155,7 +156,7 @@ func UpdatePodIpConfigState(t *testing.T, svc *HTTPRestService, ipconfigs map[st func TestIPAMGetAvailableIPConfig(t *testing.T) { svc := getTestService() - testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Available, 0) + testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } @@ -173,7 +174,7 @@ func TestIPAMGetAvailableIPConfig(t *testing.T) { t.Fatal("Expected IP retrieval to be nil") } - desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Allocated, 0) + desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Assigned, 0) desiredState.PodInfo = testPod1Info if reflect.DeepEqual(desiredState, actualstate) != true { @@ -187,8 +188,8 @@ func TestIPAMGetNextAvailableIPConfig(t *testing.T) { // Add already allocated pod ip to state svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = testPod1GUID - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) - state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, cns.Available, 0) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) + state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -211,7 +212,7 @@ func TestIPAMGetNextAvailableIPConfig(t *testing.T) { t.Fatalf("Expected IP retrieval to be nil: %+v", err) } // want second available Pod IP State as first has been allocated - desiredState, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, cns.Allocated, 24, 0, testPod2Info) + desiredState, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, types.Assigned, 24, 0, testPod2Info) if reflect.DeepEqual(desiredState, actualstate) != true { t.Fatalf("Desired state not matching actual state, expected: %+v, actual: %+v", desiredState, actualstate) @@ -222,7 +223,7 @@ func TestIPAMGetAlreadyAllocatedIPConfigForSamePod(t *testing.T) { svc := getTestService() // Add Allocated Pod IP to state - testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } @@ -243,7 +244,7 @@ func TestIPAMGetAlreadyAllocatedIPConfigForSamePod(t *testing.T) { t.Fatalf("Expected not error: %+v", err) } - desiredState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + desiredState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) if reflect.DeepEqual(desiredState, actualstate) != true { t.Fatalf("Desired state not matching actual state, expected: %+v, actual: %+v", desiredState, actualstate) @@ -254,7 +255,7 @@ func TestIPAMAttemptToRequestIPNotFoundInPool(t *testing.T) { svc := getTestService() // Add Available Pod IP to state - testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Available, 0) + testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } @@ -282,7 +283,7 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T) { svc := getTestService() // Add Available Pod IP to state - testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Available, 0) + testState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } @@ -305,7 +306,7 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T) { t.Fatalf("Expected IP retrieval to be nil: %+v", err) } - desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Allocated, 0) + desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Assigned, 0) desiredState.PodInfo = testPod1Info if reflect.DeepEqual(desiredState, actualstate) != true { @@ -317,7 +318,7 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAllocatedSpecfiedIP(t *testing.T svc := getTestService() // set state as already allocated - testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, } @@ -345,8 +346,8 @@ func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) { svc := getTestService() // set state as already allocated - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) - state2, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, cns.Allocated, 24, 0, testPod2Info) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) + state2, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, types.Assigned, 24, 0, testPod2Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -376,7 +377,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { svc := getTestService() // set state as already allocated - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, } @@ -422,7 +423,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { t.Fatalf("Expected IP retrieval to be nil: %+v", err) } - desiredState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + desiredState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) // want first available Pod IP State desiredState.IPAddress = desiredIpAddress desiredState.PodInfo = testPod2Info @@ -435,7 +436,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { func TestIPAMReleaseIPIdempotency(t *testing.T) { svc := getTestService() // set state as already allocated - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, } @@ -461,7 +462,7 @@ func TestIPAMReleaseIPIdempotency(t *testing.T) { func TestIPAMAllocateIPIdempotency(t *testing.T) { svc := getTestService() // set state as already allocated - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -481,9 +482,9 @@ func TestIPAMAllocateIPIdempotency(t *testing.T) { func TestAvailableIPConfigs(t *testing.T) { svc := getTestService() - state1 := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Available, 0) - state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, cns.Available, 0) - state3 := NewPodState(testIP3, 24, testPod3GUID, testNCID, cns.Available, 0) + state1 := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Available, 0) + state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, types.Available, 0) + state3 := NewPodState(testIP3, 24, testPod3GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -521,7 +522,7 @@ func TestAvailableIPConfigs(t *testing.T) { availableIps = svc.GetAvailableIPConfigs() validateIpState(t, availableIps, desiredAvailableIps) - desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, cns.Allocated, 0) + desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Assigned, 0) desiredState.PodInfo = testPod1Info desiredAllocatedIpConfigs[desiredState.ID] = desiredState allocatedIps = svc.GetAllocatedIPConfigs() @@ -552,7 +553,7 @@ func validateIpState(t *testing.T, actualIps []cns.IPConfigurationStatus, expect func TestIPAMMarkIPCountAsPending(t *testing.T) { svc := getTestService() // set state as already allocated - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Available, 24, 0, testPod1Info) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Available, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, } @@ -695,8 +696,8 @@ func TestIPAMMarkExistingIPConfigAsPending(t *testing.T) { // Add already allocated pod ip to state svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = testPod1GUID - state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, cns.Allocated, 24, 0, testPod1Info) - state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, cns.Available, 0) + state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) + state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, types.Available, 0) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, diff --git a/cns/restserver/util.go b/cns/restserver/util.go index c5835eeb98..74c0345408 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -225,7 +225,7 @@ func (service *HTTPRestService) updateIPConfigsStateUntransacted( ipConfigStatus, exists := service.PodIPConfigState[ipID] if exists { // pod ip exists, validate if state is not allocated, else fail - if ipConfigStatus.State == cns.Allocated { + if ipConfigStatus.State == types.Assigned { errMsg := fmt.Sprintf("Failed to delete an Allocated IP %v", ipConfigStatus) return types.InconsistentIPConfigState, errMsg } @@ -277,11 +277,11 @@ func (service *HTTPRestService) addIPConfigStateUntransacted(ncID string, hostVe ipID, ipconfig.IPAddress, ipconfig.NCVersion, hostVersion) // Using the updated NC version attached with IP to compare with latest nmagent version and determine IP statues. // When reconcile, service.PodIPConfigState doens't exist, rebuild it with the help of NC version attached with IP. - var newIPCNSStatus cns.IPConfigState + var newIPCNSStatus types.IPState if hostVersion < ipconfig.NCVersion { - newIPCNSStatus = cns.PendingProgramming + newIPCNSStatus = types.PendingProgramming } else { - newIPCNSStatus = cns.Available + newIPCNSStatus = types.Available } // add the new State ipconfigStatus := cns.IPConfigurationStatus{ @@ -321,7 +321,7 @@ func (service *HTTPRestService) removeToBeDeletedIPStateUntransacted( ipConfigStatus, exists := service.PodIPConfigState[ipID] if exists { // pod ip exists, validate if state is not allocated, else fail - if ipConfigStatus.State == cns.Allocated { + if ipConfigStatus.State == types.Assigned { errMsg := fmt.Sprintf("Failed to delete an Allocated IP %v", ipConfigStatus) return types.InconsistentIPConfigState, errMsg } diff --git a/cns/types/ipconfig.go b/cns/types/ipconfig.go new file mode 100644 index 0000000000..69541d6b84 --- /dev/null +++ b/cns/types/ipconfig.go @@ -0,0 +1,16 @@ +package types + +// IPState defines the possible states an IP can be in for CNS IPAM. +type IPState string + +const ( + // Available IPConfigState for IPs available for CNS to use. + // The total pool that CNS has access to are its "allocated" IPs. + Available IPState = "Available" + // Assigned IPConfigState for IPs that CNS has assigned to Pods. + Assigned IPState = "Assigned" + // PendingRelease IPConfigState for IPs pending release. + PendingRelease IPState = "PendingRelease" + // PendingProgramming IPConfigState for IPs pending programming. + PendingProgramming IPState = "PendingProgramming" +) From b8125f939d562b25d340851a4c0000b997c8c65b Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Thu, 18 Nov 2021 16:09:28 -0600 Subject: [PATCH 2/7] build ip pool state and populate struct pre-reconcile Signed-off-by: Evan Baker --- cns/api.go | 2 +- cns/fakes/cnsfake.go | 2 +- cns/ipampool/metrics.go | 45 ++++++++++++----- cns/ipampool/monitor.go | 97 +++++++++++++++++++++---------------- cns/restserver/ipam.go | 6 +-- cns/restserver/ipam_test.go | 6 +-- 6 files changed, 96 insertions(+), 62 deletions(-) diff --git a/cns/api.go b/cns/api.go index 18ede3d88f..30cf812dec 100644 --- a/cns/api.go +++ b/cns/api.go @@ -42,7 +42,7 @@ type HTTPService interface { SyncNodeStatus(string, string, string, json.RawMessage) (types.ResponseCode, string) GetPendingProgramIPConfigs() []IPConfigurationStatus GetAvailableIPConfigs() []IPConfigurationStatus - GetAllocatedIPConfigs() []IPConfigurationStatus + GetAssignedIPConfigs() []IPConfigurationStatus GetPendingReleaseIPConfigs() []IPConfigurationStatus GetPodIPConfigState() map[string]IPConfigurationStatus MarkIPAsPendingRelease(numberToMark int) (map[string]IPConfigurationStatus, error) diff --git a/cns/fakes/cnsfake.go b/cns/fakes/cnsfake.go index 8570ea2059..d6604c36df 100644 --- a/cns/fakes/cnsfake.go +++ b/cns/fakes/cnsfake.go @@ -221,7 +221,7 @@ func (fake *HTTPServiceFake) GetAvailableIPConfigs() []cns.IPConfigurationStatus return ipconfigs } -func (fake *HTTPServiceFake) GetAllocatedIPConfigs() []cns.IPConfigurationStatus { +func (fake *HTTPServiceFake) GetAssignedIPConfigs() []cns.IPConfigurationStatus { ipconfigs := []cns.IPConfigurationStatus{} for _, ipconfig := range fake.IPStateManager.AllocatedIPConfigState { ipconfigs = append(ipconfigs, ipconfig) diff --git a/cns/ipampool/metrics.go b/cns/ipampool/metrics.go index 2089f55100..378c7439fb 100644 --- a/cns/ipampool/metrics.go +++ b/cns/ipampool/metrics.go @@ -9,7 +9,13 @@ var ( ipamAllocatedIPCount = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "ipam_allocated_ips", - Help: "Allocated IP count.", + Help: "CNS allocated IP pool size.", + }, + ) + ipamAssignedIPCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipam_assigned_ips", + Help: "Assigned IP count.", }, ) ipamAvailableIPCount = prometheus.NewGauge( @@ -24,12 +30,6 @@ var ( Help: "IPAM IP pool batch size.", }, ) - ipamIPPool = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "ipam_ip_pool_size", - Help: "IP pool size.", - }, - ) ipamMaxIPCount = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "ipam_max_ips", @@ -54,10 +54,16 @@ var ( Help: "Requested IP count.", }, ) - ipamUnallocatedIPCount = prometheus.NewGauge( + ipamRequestedUnassignedIPConfigCount = prometheus.NewGauge( prometheus.GaugeOpts{ - Name: "ipam_unallocated_ips", - Help: "Unallocated IP count.", + Name: "ipam_requested_unassigned_ips", + Help: "Requested IP count.", + }, + ) + ipamUnassignedIPCount = prometheus.NewGauge( + prometheus.GaugeOpts{ + Name: "ipam_unassigned_ips", + Help: "Unassigned IP count.", }, ) ipamRequestedUnallocatedIPCount = prometheus.NewGauge( @@ -71,14 +77,27 @@ var ( func init() { metrics.Registry.MustRegister( ipamAllocatedIPCount, + ipamAssignedIPCount, ipamAvailableIPCount, ipamBatchSize, - ipamIPPool, ipamMaxIPCount, ipamPendingProgramIPCount, ipamPendingReleaseIPCount, ipamRequestedIPConfigCount, - ipamRequestedUnallocatedIPCount, - ipamUnallocatedIPCount, + ipamRequestedUnassignedIPConfigCount, + ipamUnassignedIPCount, ) } + +func observeIPPoolState(state ipPoolState) { + ipamAllocatedIPCount.Set(float64(state.allocated)) + ipamAssignedIPCount.Set(float64(state.assigned)) + ipamAvailableIPCount.Set(float64(state.available)) + ipamBatchSize.Set(float64(state.batch)) + ipamMaxIPCount.Set(float64(state.max)) + ipamPendingProgramIPCount.Set(float64(state.pendingProgramming)) + ipamPendingReleaseIPCount.Set(float64(state.pendingRelease)) + ipamRequestedIPConfigCount.Set(float64(state.requested)) + ipamRequestedUnassignedIPConfigCount.Set(float64(state.requestedUnassigned)) + ipamUnassignedIPCount.Set(float64(state.unassigned)) +} diff --git a/cns/ipampool/monitor.go b/cns/ipampool/monitor.go index b95efecd77..e7fd39707c 100644 --- a/cns/ipampool/monitor.go +++ b/cns/ipampool/monitor.go @@ -2,13 +2,13 @@ package ipampool import ( "context" - "fmt" "sync" "time" "github.com/Azure/azure-container-networking/cns" "github.com/Azure/azure-container-networking/cns/logger" "github.com/Azure/azure-container-networking/cns/metric" + "github.com/Azure/azure-container-networking/cns/types" "github.com/Azure/azure-container-networking/crd/nodenetworkconfig/api/v1alpha" "github.com/pkg/errors" ) @@ -97,57 +97,74 @@ func (pm *Monitor) Start(ctx context.Context) error { } } -func (pm *Monitor) reconcile(ctx context.Context) error { - cnsPodIPConfigCount := len(pm.httpService.GetPodIPConfigState()) - pendingProgramCount := len(pm.httpService.GetPendingProgramIPConfigs()) // TODO: add pending program count to real cns - allocatedPodIPCount := len(pm.httpService.GetAllocatedIPConfigs()) - pendingReleaseIPCount := len(pm.httpService.GetPendingReleaseIPConfigs()) - availableIPConfigCount := len(pm.httpService.GetAvailableIPConfigs()) // TODO: add pending allocation count to real cns - requestedIPConfigCount := int(pm.spec.RequestedIPCount) - unallocated := cnsPodIPConfigCount - allocatedPodIPCount - requestedUnallocated := requestedIPConfigCount - allocatedPodIPCount - batchSize := pm.scaler.BatchSize - maxIPCount := pm.scaler.MaxIPCount +type ipPoolState struct { + allocated int + assigned int + available int + batch int + max int + pendingProgramming int + pendingRelease int + requested int + requestedUnassigned int + unassigned int +} - msg := fmt.Sprintf("[ipam-pool-monitor] Pool Size: %d, Goal Size: %d, BatchSize: %d, MaxIPCount: %d, Allocated: %d, Available: %d, Pending Release: %d, Unallocated: %d, Requested Unallocated: %d Pending Program: %d", //nolint:lll // ignore - cnsPodIPConfigCount, pm.spec.RequestedIPCount, batchSize, maxIPCount, allocatedPodIPCount, availableIPConfigCount, pendingReleaseIPCount, unallocated, requestedUnallocated, pendingProgramCount) +func buildIPPoolState(ips map[string]cns.IPConfigurationStatus, spec v1alpha.NodeNetworkConfigSpec, scaler v1alpha.Scaler) ipPoolState { + state := ipPoolState{ + allocated: len(ips), + batch: int(scaler.BatchSize), + requested: int(spec.RequestedIPCount), + max: int(scaler.MaxIPCount), + } + for _, v := range ips { + switch v.State { + case types.Assigned: + state.assigned++ + case types.Available: + state.available++ + case types.PendingProgramming: + state.pendingProgramming++ + case types.PendingRelease: + state.pendingRelease++ + } + } + state.unassigned = state.allocated - state.assigned + state.requestedUnassigned = state.requested - state.assigned + return state +} - ipamAllocatedIPCount.Set(float64(allocatedPodIPCount)) - ipamAvailableIPCount.Set(float64(availableIPConfigCount)) - ipamBatchSize.Set(float64(batchSize)) - ipamIPPool.Set(float64(cnsPodIPConfigCount)) - ipamMaxIPCount.Set(float64(maxIPCount)) - ipamPendingProgramIPCount.Set(float64(pendingProgramCount)) - ipamPendingReleaseIPCount.Set(float64(pendingReleaseIPCount)) - ipamRequestedIPConfigCount.Set(float64(requestedIPConfigCount)) - ipamUnallocatedIPCount.Set(float64(unallocated)) - ipamRequestedUnallocatedIPCount.Set(float64(requestedUnallocated)) +func (pm *Monitor) reconcile(ctx context.Context) error { + allocatedIPs := pm.httpService.GetPodIPConfigState() + state := buildIPPoolState(allocatedIPs, pm.spec, pm.scaler) + logger.Printf("ipam-pool-monitor state %#v", state) + observeIPPoolState(state) switch { // pod count is increasing - case requestedUnallocated < pm.state.minFreeCount: - if pm.spec.RequestedIPCount == maxIPCount { + case state.requestedUnassigned < pm.state.minFreeCount: + if state.requested == state.max { // If we're already at the maxIPCount, don't try to increase return nil } - logger.Printf("[ipam-pool-monitor] Increasing pool size...%s ", msg) + logger.Printf("[ipam-pool-monitor] Increasing pool size...") return pm.increasePoolSize(ctx) // pod count is decreasing - case unallocated >= pm.state.maxFreeCount: - logger.Printf("[ipam-pool-monitor] Decreasing pool size...%s ", msg) - return pm.decreasePoolSize(ctx, pendingReleaseIPCount) + case state.unassigned >= pm.state.maxFreeCount: + logger.Printf("[ipam-pool-monitor] Decreasing pool size...") + return pm.decreasePoolSize(ctx, state.pendingRelease) // CRD has reconciled CNS state, and target spec is now the same size as the state - // free to remove the IPs from the CRD - case len(pm.spec.IPsNotInUse) != pendingReleaseIPCount: - logger.Printf("[ipam-pool-monitor] Removing Pending Release IPs from CRD...%s ", msg) + // free to remove the IP's from the CRD + case len(pm.spec.IPsNotInUse) != state.pendingRelease: + logger.Printf("[ipam-pool-monitor] Removing Pending Release IPs from CRD...") return pm.cleanPendingRelease(ctx) // no pods scheduled - case allocatedPodIPCount == 0: - logger.Printf("[ipam-pool-monitor] No pods scheduled, %s", msg) + case state.assigned == 0: + logger.Printf("[ipam-pool-monitor] No pods scheduled") return nil } @@ -176,7 +193,7 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error { } logger.Printf("[ipam-pool-monitor] Increasing pool size, Current Pool Size: %v, Updated Requested IP Count: %v, Pods with IPs:%v, ToBeDeleted Count: %v", - len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) + len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAssignedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) if _, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec); err != nil { // caller will retry to update the CRD again @@ -219,12 +236,10 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI logger.Printf("[ipam-pool-monitor] updatedRequestedIPCount %v", updatedRequestedIPCount) - if pm.state.notInUseCount == 0 || - pm.state.notInUseCount < existingPendingReleaseIPCount { + if pm.state.notInUseCount == 0 || pm.state.notInUseCount < existingPendingReleaseIPCount { logger.Printf("[ipam-pool-monitor] Marking IPs as PendingRelease, ipsToBeReleasedCount %d", int(decreaseIPCountBy)) var err error - pendingIPAddresses, err = pm.httpService.MarkIPAsPendingRelease(int(decreaseIPCountBy)) - if err != nil { + if pendingIPAddresses, err = pm.httpService.MarkIPAsPendingRelease(int(decreaseIPCountBy)); err != nil { return err } @@ -243,7 +258,7 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI tempNNCSpec.RequestedIPCount -= int64(len(pendingIPAddresses)) logger.Printf("[ipam-pool-monitor] Decreasing pool size, Current Pool Size: %v, Requested IP Count: %v, Pods with IPs: %v, ToBeDeleted Count: %v", - len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) + len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAssignedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) _, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec) if err != nil { diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index 1b1cf3a3d3..09e900e69e 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -247,9 +247,9 @@ func (service *HTTPRestService) handleDebugIPAddresses(w http.ResponseWriter, r logger.ResponseEx(service.Name, req, resp, resp.Response.ReturnCode, err) } -// GetAllocatedIPConfigs returns a filtered list of IPs which are in -// Allocated State. -func (service *HTTPRestService) GetAllocatedIPConfigs() []cns.IPConfigurationStatus { +// GetAssignedIPConfigs returns a filtered list of IPs which are in +// Assigned State. +func (service *HTTPRestService) GetAssignedIPConfigs() []cns.IPConfigurationStatus { service.RLock() defer service.RUnlock() return filter.MatchAnyIPConfigState(service.PodIPConfigState, filter.StateAssigned) diff --git a/cns/restserver/ipam_test.go b/cns/restserver/ipam_test.go index 2d8fe18aec..0d58de5617 100644 --- a/cns/restserver/ipam_test.go +++ b/cns/restserver/ipam_test.go @@ -502,7 +502,7 @@ func TestAvailableIPConfigs(t *testing.T) { validateIpState(t, availableIps, desiredAvailableIps) desiredAllocatedIpConfigs := make(map[string]cns.IPConfigurationStatus) - allocatedIps := svc.GetAllocatedIPConfigs() + allocatedIps := svc.GetAssignedIPConfigs() validateIpState(t, allocatedIps, desiredAllocatedIpConfigs) req := cns.IPConfigRequest{ @@ -525,7 +525,7 @@ func TestAvailableIPConfigs(t *testing.T) { desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Assigned, 0) desiredState.PodInfo = testPod1Info desiredAllocatedIpConfigs[desiredState.ID] = desiredState - allocatedIps = svc.GetAllocatedIPConfigs() + allocatedIps = svc.GetAssignedIPConfigs() validateIpState(t, allocatedIps, desiredAllocatedIpConfigs) } @@ -727,7 +727,7 @@ func TestIPAMMarkExistingIPConfigAsPending(t *testing.T) { t.Fatalf("Expected to fail when marking allocated ip as pending") } - allocatedIPConfigs := svc.GetAllocatedIPConfigs() + allocatedIPConfigs := svc.GetAssignedIPConfigs() if allocatedIPConfigs[0].ID != testPod1GUID { t.Fatalf("Expected to see ID %v in pending release ipconfigs, actual %+v", testPod1GUID, allocatedIPConfigs) } From 1bef8b924aabe2ee40de0b35dce7a924ef66cb4e Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Tue, 23 Nov 2021 13:43:33 -0600 Subject: [PATCH 3/7] upcast to int64 and move scaler fields to metastate Signed-off-by: Evan Baker --- cns/api.go | 6 +- cns/fakes/monitor.go | 6 +- cns/ipampool/metrics.go | 6 +- cns/ipampool/monitor.go | 108 ++++++++++++++++++----------------- cns/ipampool/monitor_test.go | 5 +- 5 files changed, 68 insertions(+), 63 deletions(-) diff --git a/cns/api.go b/cns/api.go index 30cf812dec..998e679c49 100644 --- a/cns/api.go +++ b/cns/api.go @@ -221,9 +221,9 @@ type IPAMPoolMonitor interface { // IpamPoolMonitorStateSnapshot struct to expose state values for IPAMPoolMonitor struct type IpamPoolMonitorStateSnapshot struct { - MinimumFreeIps int - MaximumFreeIps int - UpdatingIpsNotInUseCount int + MinimumFreeIps int64 + MaximumFreeIps int64 + UpdatingIpsNotInUseCount int64 CachedNNC v1alpha.NodeNetworkConfig } diff --git a/cns/fakes/monitor.go b/cns/fakes/monitor.go index d9087f20b5..c41f1c2f77 100644 --- a/cns/fakes/monitor.go +++ b/cns/fakes/monitor.go @@ -11,7 +11,7 @@ import ( ) type MonitorFake struct { - IPsNotInUseCount int + IPsNotInUseCount int64 NodeNetworkConfig *v1alpha.NodeNetworkConfig } @@ -29,8 +29,8 @@ func (*MonitorFake) Reconcile() error { func (f *MonitorFake) GetStateSnapshot() cns.IpamPoolMonitorStateSnapshot { return cns.IpamPoolMonitorStateSnapshot{ - MaximumFreeIps: int(float64(f.NodeNetworkConfig.Status.Scaler.BatchSize) * (float64(f.NodeNetworkConfig.Status.Scaler.ReleaseThresholdPercent) / 100)), //nolint:gomnd // it's a percent - MinimumFreeIps: int(float64(f.NodeNetworkConfig.Status.Scaler.BatchSize) * (float64(f.NodeNetworkConfig.Status.Scaler.RequestThresholdPercent) / 100)), //nolint:gomnd // it's a percent + MaximumFreeIps: int64(float64(f.NodeNetworkConfig.Status.Scaler.BatchSize) * (float64(f.NodeNetworkConfig.Status.Scaler.ReleaseThresholdPercent) / 100)), //nolint:gomnd // it's a percent + MinimumFreeIps: int64(float64(f.NodeNetworkConfig.Status.Scaler.BatchSize) * (float64(f.NodeNetworkConfig.Status.Scaler.RequestThresholdPercent) / 100)), //nolint:gomnd // it's a percent UpdatingIpsNotInUseCount: f.IPsNotInUseCount, CachedNNC: *f.NodeNetworkConfig, } diff --git a/cns/ipampool/metrics.go b/cns/ipampool/metrics.go index 378c7439fb..9299e8f2e1 100644 --- a/cns/ipampool/metrics.go +++ b/cns/ipampool/metrics.go @@ -89,12 +89,12 @@ func init() { ) } -func observeIPPoolState(state ipPoolState) { +func observeIPPoolState(state ipPoolState, meta metaState) { ipamAllocatedIPCount.Set(float64(state.allocated)) ipamAssignedIPCount.Set(float64(state.assigned)) ipamAvailableIPCount.Set(float64(state.available)) - ipamBatchSize.Set(float64(state.batch)) - ipamMaxIPCount.Set(float64(state.max)) + ipamBatchSize.Set(float64(meta.batch)) + ipamMaxIPCount.Set(float64(meta.max)) ipamPendingProgramIPCount.Set(float64(state.pendingProgramming)) ipamPendingReleaseIPCount.Set(float64(state.pendingRelease)) ipamRequestedIPConfigCount.Set(float64(state.requested)) diff --git a/cns/ipampool/monitor.go b/cns/ipampool/monitor.go index e7fd39707c..0a7dfd5f4f 100644 --- a/cns/ipampool/monitor.go +++ b/cns/ipampool/monitor.go @@ -24,11 +24,13 @@ type nodeNetworkConfigSpecUpdater interface { UpdateSpec(context.Context, *v1alpha.NodeNetworkConfigSpec) (*v1alpha.NodeNetworkConfig, error) } -// poolState is the Monitor's view of the IP pool. -type poolState struct { - minFreeCount int - maxFreeCount int - notInUseCount int +// metaState is the Monitor's configuration state for the IP pool. +type metaState struct { + batch int64 + max int64 + maxFreeCount int64 + minFreeCount int64 + notInUseCount int64 } type Options struct { @@ -39,8 +41,7 @@ type Options struct { type Monitor struct { opts *Options spec v1alpha.NodeNetworkConfigSpec - scaler v1alpha.Scaler - state poolState + metastate metaState nnccli nodeNetworkConfigSpecUpdater httpService cns.HTTPService initialized chan interface{} @@ -85,8 +86,10 @@ func (pm *Monitor) Start(ctx context.Context) error { } case nnc := <-pm.nncSource: // received a new NodeNetworkConfig, extract the data from it and re-reconcile. pm.spec = nnc.Spec - pm.scaler = nnc.Status.Scaler - pm.state.minFreeCount, pm.state.maxFreeCount = CalculateMinFreeIPs(pm.scaler), CalculateMaxFreeIPs(pm.scaler) + scaler := nnc.Status.Scaler + pm.metastate.batch = scaler.BatchSize + pm.metastate.max = scaler.MaxIPCount + pm.metastate.minFreeCount, pm.metastate.maxFreeCount = CalculateMinFreeIPs(scaler), CalculateMaxFreeIPs(scaler) pm.once.Do(func() { close(pm.initialized) }) // close the init channel the first time we receive a NodeNetworkConfig. } // if control has flowed through the select(s) to this point, we can now reconcile. @@ -97,25 +100,30 @@ func (pm *Monitor) Start(ctx context.Context) error { } } +// ipPoolState is the current actual state of the CNS IP pool. type ipPoolState struct { - allocated int - assigned int - available int - batch int - max int - pendingProgramming int - pendingRelease int - requested int - requestedUnassigned int - unassigned int + // allocated are the IPs given to CNS. + allocated int64 + // assigned are the IPs CNS gives to Pods. + assigned int64 + // available are the allocated IPs in state "Available". + available int64 + // pendingProgramming are the allocated IPs in state "PendingProgramming". + pendingProgramming int64 + // pendingRelease are the allocated IPs in state "PendingRelease". + pendingRelease int64 + // requested are the IPs CNS has requested that it be allocated. + requested int64 + // requestedUnassigned are the "future" unassigned IPs, if the requested IP count is honored: requested - assigned. + requestedUnassigned int64 + // unassigned are the currently unassigned IPs: allocated - assigned. + unassigned int64 } -func buildIPPoolState(ips map[string]cns.IPConfigurationStatus, spec v1alpha.NodeNetworkConfigSpec, scaler v1alpha.Scaler) ipPoolState { +func buildIPPoolState(ips map[string]cns.IPConfigurationStatus, spec v1alpha.NodeNetworkConfigSpec) ipPoolState { state := ipPoolState{ - allocated: len(ips), - batch: int(scaler.BatchSize), - requested: int(spec.RequestedIPCount), - max: int(scaler.MaxIPCount), + allocated: int64(len(ips)), + requested: spec.RequestedIPCount, } for _, v := range ips { switch v.State { @@ -136,14 +144,14 @@ func buildIPPoolState(ips map[string]cns.IPConfigurationStatus, spec v1alpha.Nod func (pm *Monitor) reconcile(ctx context.Context) error { allocatedIPs := pm.httpService.GetPodIPConfigState() - state := buildIPPoolState(allocatedIPs, pm.spec, pm.scaler) + state := buildIPPoolState(allocatedIPs, pm.spec) logger.Printf("ipam-pool-monitor state %#v", state) - observeIPPoolState(state) + observeIPPoolState(state, pm.metastate) switch { // pod count is increasing - case state.requestedUnassigned < pm.state.minFreeCount: - if state.requested == state.max { + case state.requestedUnassigned < pm.metastate.minFreeCount: + if state.requested == pm.metastate.max { // If we're already at the maxIPCount, don't try to increase return nil } @@ -152,13 +160,13 @@ func (pm *Monitor) reconcile(ctx context.Context) error { return pm.increasePoolSize(ctx) // pod count is decreasing - case state.unassigned >= pm.state.maxFreeCount: + case state.unassigned >= pm.metastate.maxFreeCount: logger.Printf("[ipam-pool-monitor] Decreasing pool size...") return pm.decreasePoolSize(ctx, state.pendingRelease) // CRD has reconciled CNS state, and target spec is now the same size as the state // free to remove the IP's from the CRD - case len(pm.spec.IPsNotInUse) != state.pendingRelease: + case int64(len(pm.spec.IPsNotInUse)) != state.pendingRelease: logger.Printf("[ipam-pool-monitor] Removing Pending Release IPs from CRD...") return pm.cleanPendingRelease(ctx) @@ -175,15 +183,14 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error { tempNNCSpec := pm.createNNCSpecForCRD() // Query the max IP count - maxIPCount := pm.scaler.MaxIPCount previouslyRequestedIPCount := tempNNCSpec.RequestedIPCount - batchSize := pm.scaler.BatchSize + batchSize := pm.metastate.batch - tempNNCSpec.RequestedIPCount += batchSize - if tempNNCSpec.RequestedIPCount > maxIPCount { + tempNNCSpec.RequestedIPCount += int64(batchSize) + if tempNNCSpec.RequestedIPCount > int64(pm.metastate.max) { // We don't want to ask for more ips than the max - logger.Printf("[ipam-pool-monitor] Requested IP count (%v) is over max limit (%v), requesting max limit instead.", tempNNCSpec.RequestedIPCount, maxIPCount) - tempNNCSpec.RequestedIPCount = maxIPCount + logger.Printf("[ipam-pool-monitor] Requested IP count (%v) is over max limit (%v), requesting max limit instead.", tempNNCSpec.RequestedIPCount, pm.metastate.max) + tempNNCSpec.RequestedIPCount = int64(pm.metastate.max) } // If the requested IP count is same as before, then don't do anything @@ -208,15 +215,15 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error { return nil } -func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseIPCount int) error { - // mark n number of IPs as pending +func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseIPCount int64) error { + // mark n number of IP's as pending var newIpsMarkedAsPending bool var pendingIPAddresses map[string]cns.IPConfigurationStatus var updatedRequestedIPCount int64 // Ensure the updated requested IP count is a multiple of the batch size previouslyRequestedIPCount := pm.spec.RequestedIPCount - batchSize := pm.scaler.BatchSize + batchSize := pm.metastate.batch modResult := previouslyRequestedIPCount % batchSize logger.Printf("[ipam-pool-monitor] Previously RequestedIP Count %v", previouslyRequestedIPCount) @@ -236,7 +243,7 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI logger.Printf("[ipam-pool-monitor] updatedRequestedIPCount %v", updatedRequestedIPCount) - if pm.state.notInUseCount == 0 || pm.state.notInUseCount < existingPendingReleaseIPCount { + if pm.metastate.notInUseCount == 0 || pm.metastate.notInUseCount < existingPendingReleaseIPCount { logger.Printf("[ipam-pool-monitor] Marking IPs as PendingRelease, ipsToBeReleasedCount %d", int(decreaseIPCountBy)) var err error if pendingIPAddresses, err = pm.httpService.MarkIPAsPendingRelease(int(decreaseIPCountBy)); err != nil { @@ -250,11 +257,11 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI if newIpsMarkedAsPending { // cache the updatingPendingRelease so that we dont re-set new IPs to PendingRelease in case UpdateCRD call fails - pm.state.notInUseCount = len(tempNNCSpec.IPsNotInUse) + pm.metastate.notInUseCount = int64(len(tempNNCSpec.IPsNotInUse)) } logger.Printf("[ipam-pool-monitor] Releasing IPCount in this batch %d, updatingPendingIpsNotInUse count %d", - len(pendingIPAddresses), pm.state.notInUseCount) + len(pendingIPAddresses), pm.metastate.notInUseCount) tempNNCSpec.RequestedIPCount -= int64(len(pendingIPAddresses)) logger.Printf("[ipam-pool-monitor] Decreasing pool size, Current Pool Size: %v, Requested IP Count: %v, Pods with IPs: %v, ToBeDeleted Count: %v", @@ -274,8 +281,8 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI pm.spec = tempNNCSpec // clear the updatingPendingIpsNotInUse, as we have Updated the CRD - logger.Printf("[ipam-pool-monitor] cleaning the updatingPendingIpsNotInUse, existing length %d", pm.state.notInUseCount) - pm.state.notInUseCount = 0 + logger.Printf("[ipam-pool-monitor] cleaning the updatingPendingIpsNotInUse, existing length %d", pm.metastate.notInUseCount) + pm.metastate.notInUseCount = 0 return nil } @@ -316,16 +323,13 @@ func (pm *Monitor) createNNCSpecForCRD() v1alpha.NodeNetworkConfigSpec { // GetStateSnapshot gets a snapshot of the IPAMPoolMonitor struct. func (pm *Monitor) GetStateSnapshot() cns.IpamPoolMonitorStateSnapshot { - scaler, spec, state := pm.scaler, pm.spec, pm.state + spec, state := pm.spec, pm.metastate return cns.IpamPoolMonitorStateSnapshot{ MinimumFreeIps: state.minFreeCount, MaximumFreeIps: state.maxFreeCount, UpdatingIpsNotInUseCount: state.notInUseCount, CachedNNC: v1alpha.NodeNetworkConfig{ Spec: spec, - Status: v1alpha.NodeNetworkConfigStatus{ - Scaler: scaler, - }, }, } } @@ -372,13 +376,13 @@ func (pm *Monitor) clampScaler(scaler *v1alpha.Scaler) { // CalculateMinFreeIPs calculates the minimum free IP quantity based on the Scaler // in the passed NodeNetworkConfig. //nolint:gocritic // ignore hugeparam -func CalculateMinFreeIPs(scaler v1alpha.Scaler) int { - return int(float64(scaler.BatchSize) * (float64(scaler.RequestThresholdPercent) / 100)) //nolint:gomnd // it's a percent +func CalculateMinFreeIPs(scaler v1alpha.Scaler) int64 { + return int64(float64(scaler.BatchSize) * (float64(scaler.RequestThresholdPercent) / 100)) //nolint:gomnd // it's a percent } // CalculateMaxFreeIPs calculates the maximum free IP quantity based on the Scaler // in the passed NodeNetworkConfig. //nolint:gocritic // ignore hugeparam -func CalculateMaxFreeIPs(scaler v1alpha.Scaler) int { - return int(float64(scaler.BatchSize) * (float64(scaler.ReleaseThresholdPercent) / 100)) //nolint:gomnd // it's a percent +func CalculateMaxFreeIPs(scaler v1alpha.Scaler) int64 { + return int64(float64(scaler.BatchSize) * (float64(scaler.ReleaseThresholdPercent) / 100)) //nolint:gomnd // it's a percent } diff --git a/cns/ipampool/monitor_test.go b/cns/ipampool/monitor_test.go index c0a4b92600..9939140785 100644 --- a/cns/ipampool/monitor_test.go +++ b/cns/ipampool/monitor_test.go @@ -27,8 +27,9 @@ type directUpdatePoolMonitor struct { } func (d *directUpdatePoolMonitor) Update(nnc *v1alpha.NodeNetworkConfig) { - d.m.scaler, d.m.spec = nnc.Status.Scaler, nnc.Spec - d.m.state.minFreeCount, d.m.state.maxFreeCount = CalculateMinFreeIPs(d.m.scaler), CalculateMaxFreeIPs(d.m.scaler) + scaler := nnc.Status.Scaler + d.m.spec = nnc.Spec + d.m.metastate.minFreeCount, d.m.metastate.maxFreeCount = CalculateMinFreeIPs(scaler), CalculateMaxFreeIPs(scaler) } type state struct { From aad714da797aa78584a7633ac92dd36a0e353bf4 Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Tue, 23 Nov 2021 14:10:42 -0600 Subject: [PATCH 4/7] delint Signed-off-by: Evan Baker --- cns/cmd/cli/cli.go | 13 +++---------- cns/restserver/ipam.go | 25 ++++++++++--------------- 2 files changed, 13 insertions(+), 25 deletions(-) diff --git a/cns/cmd/cli/cli.go b/cns/cmd/cli/cli.go index ce60ba16ec..0caa414286 100644 --- a/cns/cmd/cli/cli.go +++ b/cns/cmd/cli/cli.go @@ -47,21 +47,14 @@ func getCmd(ctx context.Context, client *client.Client, arg string) error { switch types.IPState(arg) { case types.Available: states = append(states, types.Available) - case types.Assigned: states = append(states, types.Assigned) - - case types.PendingRelease: - states = append(states, types.PendingRelease) - case types.PendingProgramming: states = append(states, types.PendingProgramming) - - default: - states = append(states, types.Assigned) - states = append(states, types.Available) + case types.PendingRelease: states = append(states, types.PendingRelease) - states = append(states, types.PendingProgramming) + default: + states = append(states, types.Assigned, types.Available, types.PendingProgramming, types.PendingRelease) } addr, err := client.GetIPAddressesMatchingStates(ctx, states...) diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index 09e900e69e..5772960c8a 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -113,12 +113,12 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m for uuid, existingIpConfig := range service.PodIPConfigState { if existingIpConfig.State == types.PendingProgramming { - updatedIpConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) + updatedIPConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) if err != nil { return nil, err } - pendingReleasedIps[uuid] = updatedIpConfig + pendingReleasedIps[uuid] = updatedIPConfig if len(pendingReleasedIps) == totalIpsToRelease { return pendingReleasedIps, nil } @@ -128,12 +128,12 @@ func (service *HTTPRestService) MarkIPAsPendingRelease(totalIpsToRelease int) (m // if not all expected IPs are set to PendingRelease, then check the Available IPs for uuid, existingIpConfig := range service.PodIPConfigState { if existingIpConfig.State == types.Available { - updatedIpConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) + updatedIPConfig, err := service.updateIPConfigState(uuid, types.PendingRelease, existingIpConfig.PodInfo) if err != nil { return nil, err } - pendingReleasedIps[uuid] = updatedIpConfig + pendingReleasedIps[uuid] = updatedIPConfig if len(pendingReleasedIps) == totalIpsToRelease { return pendingReleasedIps, nil @@ -382,33 +382,28 @@ func (service *HTTPRestService) AllocateDesiredIPConfig(podInfo cns.PodInfo, des service.Lock() defer service.Unlock() - found := false for _, ipConfig := range service.PodIPConfigState { if ipConfig.IPAddress == desiredIpAddress { - if ipConfig.State == types.Assigned { + switch ipConfig.State { //nolint:exhaustive // ignoring PendingRelease case intentionally + case types.Assigned: // This IP has already been allocated, if it is allocated to same pod, then return the same // IPconfiguration if ipConfig.PodInfo.Key() == podInfo.Key() { logger.Printf("[AllocateDesiredIPConfig]: IP Config [%+v] is already allocated to this Pod [%+v]", ipConfig, podInfo) - found = true } else { return podIpInfo, fmt.Errorf("[AllocateDesiredIPConfig] Desired IP is already allocated %+v, requested for pod %+v", ipConfig, podInfo) } - } else if ipConfig.State == types.Available || ipConfig.State == types.PendingProgramming { + case types.Available, types.PendingProgramming: // This race can happen during restart, where CNS state is lost and thus we have lost the NC programmed version // As part of reconcile, we mark IPs as Allocated which are already allocated to PODs (listed from APIServer) if err := service.setIPConfigAsAllocated(ipConfig, podInfo); err != nil { return podIpInfo, err } - found = true - } else { + default: return podIpInfo, fmt.Errorf("[AllocateDesiredIPConfig] Desired IP is not available %+v", ipConfig) } - - if found { - err := service.populateIPConfigInfoUntransacted(ipConfig, &podIpInfo) - return podIpInfo, err - } + err := service.populateIPConfigInfoUntransacted(ipConfig, &podIpInfo) + return podIpInfo, err } } return podIpInfo, fmt.Errorf("Requested IP not found in pool") From db8d9efca032aacf150dc3a09abc4985ad309a22 Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Tue, 23 Nov 2021 17:47:03 -0600 Subject: [PATCH 5/7] fix tests Signed-off-by: Evan Baker --- cns/client/client_test.go | 2 +- cns/fakes/cnsfake.go | 34 +++++++++++---------- cns/ipampool/monitor.go | 46 ++++++++++++++--------------- cns/ipampool/monitor_test.go | 57 ++++++++++++++++++++---------------- cns/metric/pool.go | 16 +++++----- 5 files changed, 80 insertions(+), 75 deletions(-) diff --git a/cns/client/client_test.go b/cns/client/client_test.go index 6479f0342d..45bcfee1bc 100644 --- a/cns/client/client_test.go +++ b/cns/client/client_test.go @@ -364,7 +364,7 @@ func TestCNSClientDebugAPI(t *testing.T) { testIpamPoolMonitor := inmemory.HTTPRestServiceData.IPAMPoolMonitor assert.EqualValues(t, 5, testIpamPoolMonitor.MinimumFreeIps, "IPAMPoolMonitor state is not reflecting the initial set values") assert.EqualValues(t, 15, testIpamPoolMonitor.MaximumFreeIps, "IPAMPoolMonitor state is not reflecting the initial set values") - assert.Equal(t, 13, testIpamPoolMonitor.UpdatingIpsNotInUseCount, "IPAMPoolMonitor state is not reflecting the initial set values") + assert.EqualValues(t, 13, testIpamPoolMonitor.UpdatingIpsNotInUseCount, "IPAMPoolMonitor state is not reflecting the initial set values") // check for cached NNC Spec struct values assert.EqualValues(t, 16, testIpamPoolMonitor.CachedNNC.Spec.RequestedIPCount, "IPAMPoolMonitor cached NNC Spec is not reflecting the initial set values") diff --git a/cns/fakes/cnsfake.go b/cns/fakes/cnsfake.go index d6604c36df..c95e16ec3c 100644 --- a/cns/fakes/cnsfake.go +++ b/cns/fakes/cnsfake.go @@ -48,7 +48,7 @@ func (stack *StringStack) Pop() (string, error) { type IPStateManager struct { PendingProgramIPConfigState map[string]cns.IPConfigurationStatus AvailableIPConfigState map[string]cns.IPConfigurationStatus - AllocatedIPConfigState map[string]cns.IPConfigurationStatus + AssignedIPConfigState map[string]cns.IPConfigurationStatus PendingReleaseIPConfigState map[string]cns.IPConfigurationStatus AvailableIPIDStack StringStack sync.RWMutex @@ -58,7 +58,7 @@ func NewIPStateManager() IPStateManager { return IPStateManager{ PendingProgramIPConfigState: make(map[string]cns.IPConfigurationStatus), AvailableIPConfigState: make(map[string]cns.IPConfigurationStatus), - AllocatedIPConfigState: make(map[string]cns.IPConfigurationStatus), + AssignedIPConfigState: make(map[string]cns.IPConfigurationStatus), PendingReleaseIPConfigState: make(map[string]cns.IPConfigurationStatus), AvailableIPIDStack: StringStack{}, } @@ -75,7 +75,7 @@ func (ipm *IPStateManager) AddIPConfigs(ipconfigs []cns.IPConfigurationStatus) { ipm.AvailableIPConfigState[ipconfig.ID] = ipconfig ipm.AvailableIPIDStack.Push(ipconfig.ID) case types.Assigned: - ipm.AllocatedIPConfigState[ipconfig.ID] = ipconfig + ipm.AssignedIPConfigState[ipconfig.ID] = ipconfig case types.PendingRelease: ipm.PendingReleaseIPConfigState[ipconfig.ID] = ipconfig } @@ -97,17 +97,21 @@ func (ipm *IPStateManager) ReserveIPConfig() (cns.IPConfigurationStatus, error) if err != nil { return cns.IPConfigurationStatus{}, err } - ipm.AllocatedIPConfigState[id] = ipm.AvailableIPConfigState[id] + ipc := ipm.AvailableIPConfigState[id] + ipc.State = types.Assigned + ipm.AssignedIPConfigState[id] = ipc delete(ipm.AvailableIPConfigState, id) - return ipm.AllocatedIPConfigState[id], nil + return ipm.AssignedIPConfigState[id], nil } func (ipm *IPStateManager) ReleaseIPConfig(ipconfigID string) (cns.IPConfigurationStatus, error) { ipm.Lock() defer ipm.Unlock() - ipm.AvailableIPConfigState[ipconfigID] = ipm.AllocatedIPConfigState[ipconfigID] + ipc := ipm.AssignedIPConfigState[ipconfigID] + ipc.State = types.Available + ipm.AvailableIPConfigState[ipconfigID] = ipc ipm.AvailableIPIDStack.Push(ipconfigID) - delete(ipm.AllocatedIPConfigState, ipconfigID) + delete(ipm.AssignedIPConfigState, ipconfigID) return ipm.AvailableIPConfigState[ipconfigID], nil } @@ -166,12 +170,12 @@ func NewHTTPServiceFake() *HTTPServiceFake { } } -func (fake *HTTPServiceFake) SetNumberOfAllocatedIPs(desiredAllocatedIPCount int) error { - currentAllocatedIPCount := len(fake.IPStateManager.AllocatedIPConfigState) - delta := (desiredAllocatedIPCount - currentAllocatedIPCount) +func (fake *HTTPServiceFake) SetNumberOfAssignedIPs(assign int) error { + currentAssigned := len(fake.IPStateManager.AssignedIPConfigState) + delta := (assign - currentAssigned) if delta > 0 { - // allocated IPs + // assign IPs for i := 0; i < delta; i++ { if _, err := fake.IPStateManager.ReserveIPConfig(); err != nil { return err @@ -179,10 +183,10 @@ func (fake *HTTPServiceFake) SetNumberOfAllocatedIPs(desiredAllocatedIPCount int } return nil } - // deallocate IPs + // unassign IPs delta *= -1 i := 0 - for id := range fake.IPStateManager.AllocatedIPConfigState { + for id := range fake.IPStateManager.AssignedIPConfigState { if i >= delta { break } @@ -223,7 +227,7 @@ func (fake *HTTPServiceFake) GetAvailableIPConfigs() []cns.IPConfigurationStatus func (fake *HTTPServiceFake) GetAssignedIPConfigs() []cns.IPConfigurationStatus { ipconfigs := []cns.IPConfigurationStatus{} - for _, ipconfig := range fake.IPStateManager.AllocatedIPConfigState { + for _, ipconfig := range fake.IPStateManager.AssignedIPConfigState { ipconfigs = append(ipconfigs, ipconfig) } return ipconfigs @@ -240,7 +244,7 @@ func (fake *HTTPServiceFake) GetPendingReleaseIPConfigs() []cns.IPConfigurationS // Return union of all state maps func (fake *HTTPServiceFake) GetPodIPConfigState() map[string]cns.IPConfigurationStatus { ipconfigs := make(map[string]cns.IPConfigurationStatus) - for key, val := range fake.IPStateManager.AllocatedIPConfigState { + for key, val := range fake.IPStateManager.AssignedIPConfigState { ipconfigs[key] = val } for key, val := range fake.IPStateManager.AvailableIPConfigState { diff --git a/cns/ipampool/monitor.go b/cns/ipampool/monitor.go index 0a7dfd5f4f..61ed1fae4a 100644 --- a/cns/ipampool/monitor.go +++ b/cns/ipampool/monitor.go @@ -35,7 +35,7 @@ type metaState struct { type Options struct { RefreshDelay time.Duration - MaxIPs int + MaxIPs int64 } type Monitor struct { @@ -145,7 +145,7 @@ func buildIPPoolState(ips map[string]cns.IPConfigurationStatus, spec v1alpha.Nod func (pm *Monitor) reconcile(ctx context.Context) error { allocatedIPs := pm.httpService.GetPodIPConfigState() state := buildIPPoolState(allocatedIPs, pm.spec) - logger.Printf("ipam-pool-monitor state %#v", state) + logger.Printf("ipam-pool-monitor state %+v", state) observeIPPoolState(state, pm.metastate) switch { @@ -157,12 +157,12 @@ func (pm *Monitor) reconcile(ctx context.Context) error { } logger.Printf("[ipam-pool-monitor] Increasing pool size...") - return pm.increasePoolSize(ctx) + return pm.increasePoolSize(ctx, state) // pod count is decreasing case state.unassigned >= pm.metastate.maxFreeCount: logger.Printf("[ipam-pool-monitor] Decreasing pool size...") - return pm.decreasePoolSize(ctx, state.pendingRelease) + return pm.decreasePoolSize(ctx, state) // CRD has reconciled CNS state, and target spec is now the same size as the state // free to remove the IP's from the CRD @@ -179,28 +179,27 @@ func (pm *Monitor) reconcile(ctx context.Context) error { return nil } -func (pm *Monitor) increasePoolSize(ctx context.Context) error { +func (pm *Monitor) increasePoolSize(ctx context.Context, state ipPoolState) error { tempNNCSpec := pm.createNNCSpecForCRD() // Query the max IP count previouslyRequestedIPCount := tempNNCSpec.RequestedIPCount batchSize := pm.metastate.batch - tempNNCSpec.RequestedIPCount += int64(batchSize) - if tempNNCSpec.RequestedIPCount > int64(pm.metastate.max) { + tempNNCSpec.RequestedIPCount += batchSize + if tempNNCSpec.RequestedIPCount > pm.metastate.max { // We don't want to ask for more ips than the max - logger.Printf("[ipam-pool-monitor] Requested IP count (%v) is over max limit (%v), requesting max limit instead.", tempNNCSpec.RequestedIPCount, pm.metastate.max) - tempNNCSpec.RequestedIPCount = int64(pm.metastate.max) + logger.Printf("[ipam-pool-monitor] Requested IP count (%d) is over max limit (%d), requesting max limit instead.", tempNNCSpec.RequestedIPCount, pm.metastate.max) + tempNNCSpec.RequestedIPCount = pm.metastate.max } // If the requested IP count is same as before, then don't do anything if tempNNCSpec.RequestedIPCount == previouslyRequestedIPCount { - logger.Printf("[ipam-pool-monitor] Previously requested IP count %v is same as updated IP count %v, doing nothing", previouslyRequestedIPCount, tempNNCSpec.RequestedIPCount) + logger.Printf("[ipam-pool-monitor] Previously requested IP count %d is same as updated IP count %d, doing nothing", previouslyRequestedIPCount, tempNNCSpec.RequestedIPCount) return nil } - logger.Printf("[ipam-pool-monitor] Increasing pool size, Current Pool Size: %v, Updated Requested IP Count: %v, Pods with IPs:%v, ToBeDeleted Count: %v", - len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAssignedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) + logger.Printf("[ipam-pool-monitor] Increasing pool size, pool %+v, spec %+v", state, tempNNCSpec) if _, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec); err != nil { // caller will retry to update the CRD again @@ -209,13 +208,13 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error { logger.Printf("[ipam-pool-monitor] Increasing pool size: UpdateCRDSpec succeeded for spec %+v", tempNNCSpec) // start an alloc timer - metric.StartPoolIncreaseTimer(int(batchSize)) + metric.StartPoolIncreaseTimer(batchSize) // save the updated state to cachedSpec pm.spec = tempNNCSpec return nil } -func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseIPCount int64) error { +func (pm *Monitor) decreasePoolSize(ctx context.Context, state ipPoolState) error { // mark n number of IP's as pending var newIpsMarkedAsPending bool var pendingIPAddresses map[string]cns.IPConfigurationStatus @@ -226,9 +225,9 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI batchSize := pm.metastate.batch modResult := previouslyRequestedIPCount % batchSize - logger.Printf("[ipam-pool-monitor] Previously RequestedIP Count %v", previouslyRequestedIPCount) - logger.Printf("[ipam-pool-monitor] Batch size : %v", batchSize) - logger.Printf("[ipam-pool-monitor] modResult of (previously requested IP count mod batch size) = %v", modResult) + logger.Printf("[ipam-pool-monitor] Previously RequestedIP Count %d", previouslyRequestedIPCount) + logger.Printf("[ipam-pool-monitor] Batch size : %d", batchSize) + logger.Printf("[ipam-pool-monitor] modResult of (previously requested IP count mod batch size) = %d", modResult) if modResult != 0 { // Example: previouscount = 25, batchsize = 10, 25 - 10 = 15, NOT a multiple of batchsize (10) @@ -241,10 +240,10 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI decreaseIPCountBy := previouslyRequestedIPCount - updatedRequestedIPCount - logger.Printf("[ipam-pool-monitor] updatedRequestedIPCount %v", updatedRequestedIPCount) + logger.Printf("[ipam-pool-monitor] updatedRequestedIPCount %d", updatedRequestedIPCount) - if pm.metastate.notInUseCount == 0 || pm.metastate.notInUseCount < existingPendingReleaseIPCount { - logger.Printf("[ipam-pool-monitor] Marking IPs as PendingRelease, ipsToBeReleasedCount %d", int(decreaseIPCountBy)) + if pm.metastate.notInUseCount == 0 || pm.metastate.notInUseCount < state.pendingRelease { + logger.Printf("[ipam-pool-monitor] Marking IPs as PendingRelease, ipsToBeReleasedCount %d", decreaseIPCountBy) var err error if pendingIPAddresses, err = pm.httpService.MarkIPAsPendingRelease(int(decreaseIPCountBy)); err != nil { return err @@ -264,8 +263,7 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI len(pendingIPAddresses), pm.metastate.notInUseCount) tempNNCSpec.RequestedIPCount -= int64(len(pendingIPAddresses)) - logger.Printf("[ipam-pool-monitor] Decreasing pool size, Current Pool Size: %v, Requested IP Count: %v, Pods with IPs: %v, ToBeDeleted Count: %v", - len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAssignedIPConfigs()), len(tempNNCSpec.IPsNotInUse)) + logger.Printf("[ipam-pool-monitor] Decreasing pool size, pool %+v, spec %+v", state, tempNNCSpec) _, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec) if err != nil { @@ -275,7 +273,7 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI logger.Printf("[ipam-pool-monitor] Decreasing pool size: UpdateCRDSpec succeeded for spec %+v", tempNNCSpec) // start a dealloc timer - metric.StartPoolDecreaseTimer(int(batchSize)) + metric.StartPoolDecreaseTimer(batchSize) // save the updated state to cachedSpec pm.spec = tempNNCSpec @@ -354,7 +352,7 @@ func (pm *Monitor) Update(nnc *v1alpha.NodeNetworkConfig) { // IP pool behavior for a while until the nnc reconciler corrects the state. func (pm *Monitor) clampScaler(scaler *v1alpha.Scaler) { if scaler.MaxIPCount < 1 { - scaler.MaxIPCount = int64(pm.opts.MaxIPs) + scaler.MaxIPCount = pm.opts.MaxIPs } if scaler.BatchSize < 1 { scaler.BatchSize = 1 diff --git a/cns/ipampool/monitor_test.go b/cns/ipampool/monitor_test.go index 9939140785..e2243e8e1d 100644 --- a/cns/ipampool/monitor_test.go +++ b/cns/ipampool/monitor_test.go @@ -33,7 +33,7 @@ func (d *directUpdatePoolMonitor) Update(nnc *v1alpha.NodeNetworkConfig) { } type state struct { - allocatedIPCount int + assignedIPCount int batchSize int ipConfigCount int maxIPCount int @@ -56,9 +56,14 @@ func initFakes(initState state) (*fakes.HTTPServiceFake, *fakes.RequestControlle fakerc := fakes.NewRequestControllerFake(fakecns, scalarUnits, subnetaddresspace, initState.ipConfigCount) poolmonitor := NewMonitor(fakecns, &fakeNodeNetworkConfigUpdater{fakerc.NNC}, &Options{RefreshDelay: 100 * time.Second}) - + poolmonitor.metastate = metaState{ + batch: int64(initState.batchSize), + max: int64(initState.maxIPCount), + } fakecns.PoolMonitor = &directUpdatePoolMonitor{m: poolmonitor} - _ = fakecns.SetNumberOfAllocatedIPs(initState.allocatedIPCount) + if err := fakecns.SetNumberOfAssignedIPs(initState.assignedIPCount); err != nil { + logger.Printf("%s", err) + } return fakecns, fakerc, poolmonitor } @@ -66,7 +71,7 @@ func initFakes(initState state) (*fakes.HTTPServiceFake, *fakes.RequestControlle func TestPoolSizeIncrease(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 8, + assignedIPCount: 8, ipConfigCount: 10, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -99,7 +104,7 @@ func TestPoolSizeIncrease(t *testing.T) { func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 8, + assignedIPCount: 8, ipConfigCount: 10, requestThresholdPercent: 30, releaseThresholdPercent: 150, @@ -112,8 +117,8 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { // When poolmonitor reconcile is called, trigger increase and cache goal state assert.NoError(t, poolmonitor.reconcile(context.Background())) - // increase number of allocated IPs in CNS, within allocatable size but still inside trigger threshold - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(9)) + // increase number of allocated IP's in CNS, within allocatable size but still inside trigger threshold + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(9)) // poolmonitor reconciles, but doesn't actually update the CRD, because there is already a pending update assert.NoError(t, poolmonitor.reconcile(context.Background())) @@ -138,7 +143,7 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { func TestPoolSizeIncreaseIdempotency(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 8, + assignedIPCount: 8, ipConfigCount: 10, requestThresholdPercent: 30, releaseThresholdPercent: 150, @@ -164,7 +169,7 @@ func TestPoolSizeIncreaseIdempotency(t *testing.T) { func TestPoolIncreasePastNodeLimit(t *testing.T) { initState := state{ batchSize: 16, - allocatedIPCount: 9, + assignedIPCount: 9, ipConfigCount: 16, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -184,7 +189,7 @@ func TestPoolIncreasePastNodeLimit(t *testing.T) { func TestPoolIncreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { initState := state{ batchSize: 50, - allocatedIPCount: 16, + assignedIPCount: 16, ipConfigCount: 16, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -205,7 +210,7 @@ func TestPoolDecrease(t *testing.T) { initState := state{ batchSize: 10, ipConfigCount: 20, - allocatedIPCount: 15, + assignedIPCount: 15, requestThresholdPercent: 50, releaseThresholdPercent: 150, maxIPCount: 30, @@ -217,8 +222,8 @@ func TestPoolDecrease(t *testing.T) { // Pool monitor does nothing, as the current number of IPs falls in the threshold assert.NoError(t, poolmonitor.reconcile(context.Background())) - // Decrease the number of allocated IPs down to 5. This should trigger a scale down - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(4)) + // Decrease the number of allocated IP's down to 5. This should trigger a scale down + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(4)) // Pool monitor will adjust the spec so the pool size will be 1 batch size smaller assert.NoError(t, poolmonitor.reconcile(context.Background())) @@ -238,7 +243,7 @@ func TestPoolDecrease(t *testing.T) { func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 5, + assignedIPCount: 5, ipConfigCount: 20, requestThresholdPercent: 30, releaseThresholdPercent: 100, @@ -258,7 +263,7 @@ func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { assert.Equal(t, int64(initState.ipConfigCount-initState.batchSize), poolmonitor.spec.RequestedIPCount) // Update pods with IP count, ensure pool monitor stays the same until request controller reconciles - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(6)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(6)) // Ensure the size of the requested spec is still the same assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.ipConfigCount-initState.batchSize) @@ -277,7 +282,7 @@ func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { func TestDecreaseAndIncreaseToSameCount(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 7, + assignedIPCount: 7, ipConfigCount: 10, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -294,14 +299,14 @@ func TestDecreaseAndIncreaseToSameCount(t *testing.T) { assert.NoError(t, fakerc.Reconcile(true)) // Release all IPs - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(0)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(0)) assert.NoError(t, poolmonitor.reconcile(context.Background())) assert.Equal(t, int64(10), poolmonitor.spec.RequestedIPCount) assert.Len(t, poolmonitor.spec.IPsNotInUse, 10) // Increase it back to 20 // initial pool count is 10, set 5 of them to be allocated - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(7)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(7)) assert.NoError(t, poolmonitor.reconcile(context.Background())) assert.Equal(t, int64(20), poolmonitor.spec.RequestedIPCount) assert.Len(t, poolmonitor.spec.IPsNotInUse, 10) @@ -322,7 +327,7 @@ func TestDecreaseAndIncreaseToSameCount(t *testing.T) { func TestPoolSizeDecreaseToReallyLow(t *testing.T) { initState := state{ batchSize: 10, - allocatedIPCount: 23, + assignedIPCount: 23, ipConfigCount: 30, requestThresholdPercent: 30, releaseThresholdPercent: 100, @@ -336,7 +341,7 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Now Drop the Allocated count to really low, say 3. This should trigger release in 2 batches - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(3)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(3)) // Pool monitor does nothing, as the current number of IPs falls in the threshold assert.NoError(t, poolmonitor.reconcile(context.Background())) @@ -364,7 +369,7 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) { func TestDecreaseAfterNodeLimitReached(t *testing.T) { initState := state{ batchSize: 16, - allocatedIPCount: 30, + assignedIPCount: 20, ipConfigCount: 30, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -376,7 +381,7 @@ func TestDecreaseAfterNodeLimitReached(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Trigger a batch release - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(5)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(5)) assert.NoError(t, poolmonitor.reconcile(context.Background())) // Ensure poolmonitor asked for a multiple of batch size @@ -387,7 +392,7 @@ func TestDecreaseAfterNodeLimitReached(t *testing.T) { func TestPoolDecreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { initState := state{ batchSize: 31, - allocatedIPCount: 30, + assignedIPCount: 30, ipConfigCount: 30, requestThresholdPercent: 50, releaseThresholdPercent: 150, @@ -401,7 +406,7 @@ func TestPoolDecreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Trigger a batch release - assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(1)) + assert.NoError(t, fakecns.SetNumberOfAssignedIPs(1)) assert.NoError(t, poolmonitor.reconcile(context.Background())) assert.Equal(t, int64(initState.maxIPCount), poolmonitor.spec.RequestedIPCount) } @@ -410,8 +415,8 @@ func TestCalculateIPs(t *testing.T) { tests := []struct { name string in v1alpha.Scaler - wantMinFree int - wantMaxFree int + wantMinFree int64 + wantMaxFree int64 }{ { name: "normal", diff --git a/cns/metric/pool.go b/cns/metric/pool.go index 3c9af14d36..6adf08a6dc 100644 --- a/cns/metric/pool.go +++ b/cns/metric/pool.go @@ -32,8 +32,7 @@ import ( var incLatency prometheus.ObserverVec = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - //nolint:gomnd - Buckets: prometheus.ExponentialBuckets(0.05, 2, 15), // 50 ms to ~800 seconds + Buckets: prometheus.ExponentialBuckets(0.05, 2, 15), //nolint:gomnd // 50 ms to ~800 seconds Help: "IP pool size increase latency in seconds by batch size", Name: "ip_pool_inc_latency_seconds", }, @@ -42,8 +41,7 @@ var incLatency prometheus.ObserverVec = prometheus.NewHistogramVec( var decLatency prometheus.ObserverVec = prometheus.NewHistogramVec( prometheus.HistogramOpts{ - //nolint:gomnd - Buckets: prometheus.ExponentialBuckets(0.05, 2, 15), // 50 ms to ~800 seconds + Buckets: prometheus.ExponentialBuckets(0.05, 2, 15), //nolint:gomnd // 50 ms to ~800 seconds Help: "IP pool size decrease latency in seconds by batch size", Name: "ip_pool_dec_latency_seconds", }, @@ -52,7 +50,7 @@ var decLatency prometheus.ObserverVec = prometheus.NewHistogramVec( type scaleEvent struct { start time.Time - batch int + batch int64 } var ( @@ -62,7 +60,7 @@ var ( // StartPoolIncreaseTimer records the start of an IP allocation request. // If an IP allocation request is already in flight, this method noops. -func StartPoolIncreaseTimer(batch int) { +func StartPoolIncreaseTimer(batch int64) { e := scaleEvent{ start: time.Now(), batch: batch, @@ -75,7 +73,7 @@ func StartPoolIncreaseTimer(batch int) { // StartPoolDecreaseTimer records the start of an IP deallocation request. // If an IP deallocation request is already in flight, this method noops. -func StartPoolDecreaseTimer(batch int) { +func StartPoolDecreaseTimer(batch int64) { e := scaleEvent{ start: time.Now(), batch: batch, @@ -92,13 +90,13 @@ func StartPoolDecreaseTimer(batch int) { func ObserverPoolScaleLatency() { select { case e := <-incEvents: - incLatency.WithLabelValues(strconv.Itoa(e.batch)).Observe(time.Since(e.start).Seconds()) + incLatency.WithLabelValues(strconv.FormatInt(e.batch, 10)).Observe(time.Since(e.start).Seconds()) //nolint:gomnd // it's decimal default: } select { case e := <-decEvents: - decLatency.WithLabelValues(strconv.Itoa(e.batch)).Observe(time.Since(e.start).Seconds()) + decLatency.WithLabelValues(strconv.FormatInt(e.batch, 10)).Observe(time.Since(e.start).Seconds()) //nolint:gomnd // it's decimal default: } } From bacbbc5995880d4c40503924260a9e3eeebaa750 Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Mon, 29 Nov 2021 19:44:27 -0600 Subject: [PATCH 6/7] upcast usage of int to int64 in monitor fake tests Signed-off-by: Evan Baker --- cns/fakes/requestcontrollerfake.go | 10 +- cns/ipampool/metrics.go | 8 +- cns/ipampool/monitor.go | 4 +- cns/ipampool/monitor_test.go | 196 ++++++++++++++--------------- 4 files changed, 106 insertions(+), 112 deletions(-) diff --git a/cns/fakes/requestcontrollerfake.go b/cns/fakes/requestcontrollerfake.go index 7301941d6d..d18e9ae1a3 100644 --- a/cns/fakes/requestcontrollerfake.go +++ b/cns/fakes/requestcontrollerfake.go @@ -19,7 +19,7 @@ type RequestControllerFake struct { ip net.IP } -func NewRequestControllerFake(cnsService *HTTPServiceFake, scalar v1alpha.Scaler, subnetAddressSpace string, numberOfIPConfigs int) *RequestControllerFake { +func NewRequestControllerFake(cnsService *HTTPServiceFake, scalar v1alpha.Scaler, subnetAddressSpace string, numberOfIPConfigs int64) *RequestControllerFake { rc := &RequestControllerFake{ cnscli: cnsService, NNC: &v1alpha.NodeNetworkConfig{ @@ -38,14 +38,14 @@ func NewRequestControllerFake(cnsService *HTTPServiceFake, scalar v1alpha.Scaler rc.ip, _, _ = net.ParseCIDR(subnetAddressSpace) rc.CarveIPConfigsAndAddToStatusAndCNS(numberOfIPConfigs) - rc.NNC.Spec.RequestedIPCount = int64(numberOfIPConfigs) + rc.NNC.Spec.RequestedIPCount = numberOfIPConfigs return rc } -func (rc *RequestControllerFake) CarveIPConfigsAndAddToStatusAndCNS(numberOfIPConfigs int) []cns.IPConfigurationStatus { +func (rc *RequestControllerFake) CarveIPConfigsAndAddToStatusAndCNS(numberOfIPConfigs int64) []cns.IPConfigurationStatus { var cnsIPConfigs []cns.IPConfigurationStatus - for i := 0; i < numberOfIPConfigs; i++ { + for i := int64(0); i < numberOfIPConfigs; i++ { ipconfigCRD := v1alpha.IPAssignment{ Name: uuid.New().String(), @@ -85,7 +85,7 @@ func remove(slice []v1alpha.IPAssignment, s int) []v1alpha.IPAssignment { } func (rc *RequestControllerFake) Reconcile(removePendingReleaseIPs bool) error { - diff := int(rc.NNC.Spec.RequestedIPCount) - len(rc.cnscli.GetPodIPConfigState()) + diff := rc.NNC.Spec.RequestedIPCount - int64(len(rc.cnscli.GetPodIPConfigState())) if diff > 0 { // carve the difference of test IPs and add them to CNS, assume dnc has populated the CRD status diff --git a/cns/ipampool/metrics.go b/cns/ipampool/metrics.go index 9299e8f2e1..542ccc2f0c 100644 --- a/cns/ipampool/metrics.go +++ b/cns/ipampool/metrics.go @@ -57,7 +57,7 @@ var ( ipamRequestedUnassignedIPConfigCount = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "ipam_requested_unassigned_ips", - Help: "Requested IP count.", + Help: "Future unassigned IP count assuming the Requested IP count is honored.", }, ) ipamUnassignedIPCount = prometheus.NewGauge( @@ -66,12 +66,6 @@ var ( Help: "Unassigned IP count.", }, ) - ipamRequestedUnallocatedIPCount = prometheus.NewGauge( - prometheus.GaugeOpts{ - Name: "ipam_requested_unallocated_ips", - Help: "Unallocated IP count using total requested IPs.", - }, - ) ) func init() { diff --git a/cns/ipampool/monitor.go b/cns/ipampool/monitor.go index 61ed1fae4a..65f9865e7c 100644 --- a/cns/ipampool/monitor.go +++ b/cns/ipampool/monitor.go @@ -165,7 +165,7 @@ func (pm *Monitor) reconcile(ctx context.Context) error { return pm.decreasePoolSize(ctx, state) // CRD has reconciled CNS state, and target spec is now the same size as the state - // free to remove the IP's from the CRD + // free to remove the IPs from the CRD case int64(len(pm.spec.IPsNotInUse)) != state.pendingRelease: logger.Printf("[ipam-pool-monitor] Removing Pending Release IPs from CRD...") return pm.cleanPendingRelease(ctx) @@ -215,7 +215,7 @@ func (pm *Monitor) increasePoolSize(ctx context.Context, state ipPoolState) erro } func (pm *Monitor) decreasePoolSize(ctx context.Context, state ipPoolState) error { - // mark n number of IP's as pending + // mark n number of IPs as pending var newIpsMarkedAsPending bool var pendingIPAddresses map[string]cns.IPConfigurationStatus var updatedRequestedIPCount int64 diff --git a/cns/ipampool/monitor_test.go b/cns/ipampool/monitor_test.go index e2243e8e1d..0a6d315692 100644 --- a/cns/ipampool/monitor_test.go +++ b/cns/ipampool/monitor_test.go @@ -32,36 +32,36 @@ func (d *directUpdatePoolMonitor) Update(nnc *v1alpha.NodeNetworkConfig) { d.m.metastate.minFreeCount, d.m.metastate.maxFreeCount = CalculateMinFreeIPs(scaler), CalculateMaxFreeIPs(scaler) } -type state struct { - assignedIPCount int - batchSize int - ipConfigCount int - maxIPCount int - releaseThresholdPercent int - requestThresholdPercent int +type testState struct { + allocated int64 + assigned int + batch int64 + max int64 + releaseThresholdPercent int64 + requestThresholdPercent int64 } -func initFakes(initState state) (*fakes.HTTPServiceFake, *fakes.RequestControllerFake, *Monitor) { +func initFakes(state testState) (*fakes.HTTPServiceFake, *fakes.RequestControllerFake, *Monitor) { logger.InitLogger("testlogs", 0, 0, "./") scalarUnits := v1alpha.Scaler{ - BatchSize: int64(initState.batchSize), - RequestThresholdPercent: int64(initState.requestThresholdPercent), - ReleaseThresholdPercent: int64(initState.releaseThresholdPercent), - MaxIPCount: int64(initState.maxIPCount), + BatchSize: state.batch, + RequestThresholdPercent: state.requestThresholdPercent, + ReleaseThresholdPercent: state.releaseThresholdPercent, + MaxIPCount: state.max, } subnetaddresspace := "10.0.0.0/8" fakecns := fakes.NewHTTPServiceFake() - fakerc := fakes.NewRequestControllerFake(fakecns, scalarUnits, subnetaddresspace, initState.ipConfigCount) + fakerc := fakes.NewRequestControllerFake(fakecns, scalarUnits, subnetaddresspace, state.allocated) poolmonitor := NewMonitor(fakecns, &fakeNodeNetworkConfigUpdater{fakerc.NNC}, &Options{RefreshDelay: 100 * time.Second}) poolmonitor.metastate = metaState{ - batch: int64(initState.batchSize), - max: int64(initState.maxIPCount), + batch: state.batch, + max: state.max, } fakecns.PoolMonitor = &directUpdatePoolMonitor{m: poolmonitor} - if err := fakecns.SetNumberOfAssignedIPs(initState.assignedIPCount); err != nil { + if err := fakecns.SetNumberOfAssignedIPs(state.assigned); err != nil { logger.Printf("%s", err) } @@ -69,13 +69,13 @@ func initFakes(initState state) (*fakes.HTTPServiceFake, *fakes.RequestControlle } func TestPoolSizeIncrease(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 8, - ipConfigCount: 10, + initState := testState{ + batch: 10, + assigned: 8, + allocated: 10, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -85,7 +85,7 @@ func TestPoolSizeIncrease(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has reached quorum with cns - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) // request controller reconciles, carves new IPs from the test subnet and adds to CNS state assert.NoError(t, fakerc.Reconcile(true)) @@ -95,20 +95,20 @@ func TestPoolSizeIncrease(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has reached quorum with cns - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) // make sure IPConfig state size reflects the new pool size - assert.Len(t, fakecns.GetPodIPConfigState(), initState.ipConfigCount+(1*initState.batchSize)) + assert.Len(t, fakecns.GetPodIPConfigState(), int(initState.allocated+(1*initState.batch))) } func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 8, - ipConfigCount: 10, + initState := testState{ + batch: 10, + assigned: 8, + allocated: 10, requestThresholdPercent: 30, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -124,7 +124,7 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has reached quorum with cns - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) // request controller reconciles, carves new IPs from the test subnet and adds to CNS state assert.NoError(t, fakerc.Reconcile(true)) @@ -134,20 +134,20 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // make sure IPConfig state size reflects the new pool size - assert.Len(t, fakecns.GetPodIPConfigState(), initState.ipConfigCount+(1*initState.batchSize)) + assert.Len(t, fakecns.GetPodIPConfigState(), int(initState.allocated+(1*initState.batch))) // ensure pool monitor has reached quorum with cns - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) } func TestPoolSizeIncreaseIdempotency(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 8, - ipConfigCount: 10, + initState := testState{ + batch: 10, + assigned: 8, + allocated: 10, requestThresholdPercent: 30, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } _, fakerc, poolmonitor := initFakes(initState) @@ -157,23 +157,23 @@ func TestPoolSizeIncreaseIdempotency(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has increased batch size - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) // reconcile pool monitor a second time, then verify requested ip count is still the same assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor requested pool size is unchanged as request controller hasn't reconciled yet - assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated+(1*initState.batch), poolmonitor.spec.RequestedIPCount) } func TestPoolIncreasePastNodeLimit(t *testing.T) { - initState := state{ - batchSize: 16, - assignedIPCount: 9, - ipConfigCount: 16, + initState := testState{ + batch: 16, + assigned: 9, + allocated: 16, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } _, fakerc, poolmonitor := initFakes(initState) @@ -183,17 +183,17 @@ func TestPoolIncreasePastNodeLimit(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has only requested the max pod ip count - assert.Equal(t, int64(initState.maxIPCount), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.max, poolmonitor.spec.RequestedIPCount) } func TestPoolIncreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { - initState := state{ - batchSize: 50, - assignedIPCount: 16, - ipConfigCount: 16, + initState := testState{ + batch: 50, + assigned: 16, + allocated: 16, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } _, fakerc, poolmonitor := initFakes(initState) @@ -203,17 +203,17 @@ func TestPoolIncreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure pool monitor has only requested the max pod ip count - assert.Equal(t, int64(initState.maxIPCount), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.max, poolmonitor.spec.RequestedIPCount) } func TestPoolDecrease(t *testing.T) { - initState := state{ - batchSize: 10, - ipConfigCount: 20, - assignedIPCount: 15, + initState := testState{ + batch: 10, + allocated: 20, + assigned: 15, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -229,7 +229,7 @@ func TestPoolDecrease(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // ensure that the adjusted spec is smaller than the initial pool size - assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.ipConfigCount-initState.batchSize) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.allocated-initState.batch)) // reconcile the fake request controller assert.NoError(t, fakerc.Reconcile(true)) @@ -241,13 +241,13 @@ func TestPoolDecrease(t *testing.T) { } func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 5, - ipConfigCount: 20, + initState := testState{ + batch: 10, + assigned: 5, + allocated: 20, requestThresholdPercent: 30, releaseThresholdPercent: 100, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -257,19 +257,19 @@ func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Ensure the size of the requested spec is still the same - assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.ipConfigCount-initState.batchSize) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.allocated-initState.batch)) // Ensure the request ipcount is now one batch size smaller than the initial IP count - assert.Equal(t, int64(initState.ipConfigCount-initState.batchSize), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated-initState.batch, poolmonitor.spec.RequestedIPCount) // Update pods with IP count, ensure pool monitor stays the same until request controller reconciles assert.NoError(t, fakecns.SetNumberOfAssignedIPs(6)) // Ensure the size of the requested spec is still the same - assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.ipConfigCount-initState.batchSize) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.allocated-initState.batch)) // Ensure the request ipcount is now one batch size smaller than the initial IP count - assert.Equal(t, int64(initState.ipConfigCount-initState.batchSize), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated-initState.batch, poolmonitor.spec.RequestedIPCount) assert.NoError(t, fakerc.Reconcile(true)) @@ -280,19 +280,19 @@ func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) { } func TestDecreaseAndIncreaseToSameCount(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 7, - ipConfigCount: 10, + initState := testState{ + batch: 10, + assigned: 7, + allocated: 10, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) assert.NoError(t, fakerc.Reconcile(true)) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(20), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, 20, poolmonitor.spec.RequestedIPCount) assert.Empty(t, poolmonitor.spec.IPsNotInUse) // Update the IPConfig state @@ -301,37 +301,37 @@ func TestDecreaseAndIncreaseToSameCount(t *testing.T) { // Release all IPs assert.NoError(t, fakecns.SetNumberOfAssignedIPs(0)) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(10), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, 10, poolmonitor.spec.RequestedIPCount) assert.Len(t, poolmonitor.spec.IPsNotInUse, 10) // Increase it back to 20 // initial pool count is 10, set 5 of them to be allocated assert.NoError(t, fakecns.SetNumberOfAssignedIPs(7)) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(20), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, 20, poolmonitor.spec.RequestedIPCount) assert.Len(t, poolmonitor.spec.IPsNotInUse, 10) // Update the IPConfig count and dont remove the pending IPs assert.NoError(t, fakerc.Reconcile(false)) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(20), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, 20, poolmonitor.spec.RequestedIPCount) assert.Len(t, poolmonitor.spec.IPsNotInUse, 10) assert.NoError(t, fakerc.Reconcile(true)) assert.NoError(t, poolmonitor.reconcile(context.Background())) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(20), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, 20, poolmonitor.spec.RequestedIPCount) assert.Empty(t, poolmonitor.spec.IPsNotInUse) } func TestPoolSizeDecreaseToReallyLow(t *testing.T) { - initState := state{ - batchSize: 10, - assignedIPCount: 23, - ipConfigCount: 30, + initState := testState{ + batch: 10, + assigned: 23, + allocated: 30, requestThresholdPercent: 30, releaseThresholdPercent: 100, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -347,19 +347,19 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Ensure the size of the requested spec is still the same - assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.batchSize) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.batch)) // Ensure the request ipcount is now one batch size smaller than the initial IP count - assert.Equal(t, int64(initState.ipConfigCount-initState.batchSize), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated-initState.batch, poolmonitor.spec.RequestedIPCount) // Reconcile again, it should release the second batch assert.NoError(t, poolmonitor.reconcile(context.Background())) // Ensure the size of the requested spec is still the same - assert.Len(t, poolmonitor.spec.IPsNotInUse, initState.batchSize*2) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.batch*2)) // Ensure the request ipcount is now one batch size smaller than the initial IP count - assert.Equal(t, int64(initState.ipConfigCount-(initState.batchSize*2)), poolmonitor.spec.RequestedIPCount) + assert.Equal(t, initState.allocated-(initState.batch*2), poolmonitor.spec.RequestedIPCount) assert.NoError(t, fakerc.Reconcile(true)) assert.NoError(t, poolmonitor.reconcile(context.Background())) @@ -367,13 +367,13 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) { } func TestDecreaseAfterNodeLimitReached(t *testing.T) { - initState := state{ - batchSize: 16, - assignedIPCount: 20, - ipConfigCount: 30, + initState := testState{ + batch: 16, + assigned: 20, + allocated: 30, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) assert.NoError(t, fakerc.Reconcile(true)) @@ -385,18 +385,18 @@ func TestDecreaseAfterNodeLimitReached(t *testing.T) { assert.NoError(t, poolmonitor.reconcile(context.Background())) // Ensure poolmonitor asked for a multiple of batch size - assert.Equal(t, int64(16), poolmonitor.spec.RequestedIPCount) - assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.maxIPCount%initState.batchSize)) + assert.EqualValues(t, 16, poolmonitor.spec.RequestedIPCount) + assert.Len(t, poolmonitor.spec.IPsNotInUse, int(initState.max%initState.batch)) } func TestPoolDecreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { - initState := state{ - batchSize: 31, - assignedIPCount: 30, - ipConfigCount: 30, + initState := testState{ + batch: 31, + assigned: 30, + allocated: 30, requestThresholdPercent: 50, releaseThresholdPercent: 150, - maxIPCount: 30, + max: 30, } fakecns, fakerc, poolmonitor := initFakes(initState) @@ -408,7 +408,7 @@ func TestPoolDecreaseBatchSizeGreaterThanMaxPodIPCount(t *testing.T) { // Trigger a batch release assert.NoError(t, fakecns.SetNumberOfAssignedIPs(1)) assert.NoError(t, poolmonitor.reconcile(context.Background())) - assert.Equal(t, int64(initState.maxIPCount), poolmonitor.spec.RequestedIPCount) + assert.EqualValues(t, initState.max, poolmonitor.spec.RequestedIPCount) } func TestCalculateIPs(t *testing.T) { From 7cc12973bed569b889e79f21304684f650417f8e Mon Sep 17 00:00:00 2001 From: Evan Baker Date: Tue, 30 Nov 2021 14:53:37 -0600 Subject: [PATCH 7/7] update remaining usages of allocate to assign Signed-off-by: Evan Baker --- cns/client/client.go | 2 +- cns/client/client_test.go | 4 +- cns/filter/ipam.go | 2 +- cns/filter/ipam_test.go | 2 +- cns/ipampool/metrics.go | 2 +- cns/ipampool/monitor_test.go | 6 +-- cns/restserver/internalapi.go | 8 ++-- cns/restserver/internalapi_test.go | 46 +++++++++---------- cns/restserver/ipam.go | 54 +++++++++++------------ cns/restserver/ipam_test.go | 56 ++++++++++++------------ cns/restserver/metrics.go | 8 ++-- cns/restserver/restserver.go | 4 +- cns/restserver/util.go | 8 ++-- cns/service/main.go | 2 +- cns/singletenantcontroller/metrics.go | 4 +- cns/singletenantcontroller/reconciler.go | 2 +- cns/types/ipconfig.go | 10 ++--- 17 files changed, 109 insertions(+), 111 deletions(-) diff --git a/cns/client/client.go b/cns/client/client.go index a6ce8e6efc..bdfd7df352 100644 --- a/cns/client/client.go +++ b/cns/client/client.go @@ -303,7 +303,7 @@ func (c *Client) ReleaseIPAddress(ctx context.Context, ipconfig cns.IPConfigRequ } // GetIPAddressesMatchingStates takes a variadic number of string parameters, to get all IP Addresses matching a number of states -// usage GetIPAddressesWithStates(types.Available, cns.Allocated) +// usage GetIPAddressesWithStates(ctx, types.Available...) func (c *Client) GetIPAddressesMatchingStates(ctx context.Context, stateFilter ...types.IPState) ([]cns.IPConfigurationStatus, error) { if len(stateFilter) == 0 { return nil, nil diff --git a/cns/client/client_test.go b/cns/client/client_test.go index 45bcfee1bc..3ebec08b7f 100644 --- a/cns/client/client_test.go +++ b/cns/client/client_test.go @@ -283,9 +283,9 @@ func TestCNSClientRequestAndRelease(t *testing.T) { assert.Equal(t, desired, resultIPnet, "Desired result not matching actual result") - // checking for allocated IP address and pod context printing before ReleaseIPAddress is called + // checking for assigned IP address and pod context printing before ReleaseIPAddress is called ipaddresses, err := cnsClient.GetIPAddressesMatchingStates(context.TODO(), types.Assigned) - assert.NoError(t, err, "Get allocated IP addresses failed") + assert.NoError(t, err, "Get assigned IP addresses failed") assert.Len(t, ipaddresses, 1, "Number of available IP addresses expected to be 1") assert.Equal(t, desiredIpAddress, ipaddresses[0].IPAddress, "Available IP address does not match expected, address state") diff --git a/cns/filter/ipam.go b/cns/filter/ipam.go index ecee37f72e..877e1ff19f 100644 --- a/cns/filter/ipam.go +++ b/cns/filter/ipam.go @@ -8,7 +8,7 @@ import ( type IPConfigStatePredicate func(ipconfig cns.IPConfigurationStatus) bool var ( - // StateAssigned is a preset filter for cns.Allocated. + // StateAssigned is a preset filter for types.Assigned. StateAssigned = ipConfigStatePredicate(types.Assigned) // StateAvailable is a preset filter for types.Available. StateAvailable = ipConfigStatePredicate(types.Available) diff --git a/cns/filter/ipam_test.go b/cns/filter/ipam_test.go index b553a7ae32..48d5e7fe7e 100644 --- a/cns/filter/ipam_test.go +++ b/cns/filter/ipam_test.go @@ -16,7 +16,7 @@ var testStatuses = []struct { { State: types.Assigned, Status: cns.IPConfigurationStatus{ - ID: "allocated", + ID: "assigned", State: types.Assigned, }, }, diff --git a/cns/ipampool/metrics.go b/cns/ipampool/metrics.go index 542ccc2f0c..0f1e0e5c27 100644 --- a/cns/ipampool/metrics.go +++ b/cns/ipampool/metrics.go @@ -9,7 +9,7 @@ var ( ipamAllocatedIPCount = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "ipam_allocated_ips", - Help: "CNS allocated IP pool size.", + Help: "CNS's allocated IP pool size.", }, ) ipamAssignedIPCount = prometheus.NewGauge( diff --git a/cns/ipampool/monitor_test.go b/cns/ipampool/monitor_test.go index 0a6d315692..962f903812 100644 --- a/cns/ipampool/monitor_test.go +++ b/cns/ipampool/monitor_test.go @@ -117,7 +117,7 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) { // When poolmonitor reconcile is called, trigger increase and cache goal state assert.NoError(t, poolmonitor.reconcile(context.Background())) - // increase number of allocated IP's in CNS, within allocatable size but still inside trigger threshold + // increase number of allocated IPs in CNS, within allocatable size but still inside trigger threshold assert.NoError(t, fakecns.SetNumberOfAssignedIPs(9)) // poolmonitor reconciles, but doesn't actually update the CRD, because there is already a pending update @@ -222,7 +222,7 @@ func TestPoolDecrease(t *testing.T) { // Pool monitor does nothing, as the current number of IPs falls in the threshold assert.NoError(t, poolmonitor.reconcile(context.Background())) - // Decrease the number of allocated IP's down to 5. This should trigger a scale down + // Decrease the number of allocated IPs down to 5. This should trigger a scale down assert.NoError(t, fakecns.SetNumberOfAssignedIPs(4)) // Pool monitor will adjust the spec so the pool size will be 1 batch size smaller @@ -340,7 +340,7 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) { // Pool monitor does nothing, as the current number of IPs falls in the threshold assert.NoError(t, poolmonitor.reconcile(context.Background())) - // Now Drop the Allocated count to really low, say 3. This should trigger release in 2 batches + // Now Drop the Assigned count to really low, say 3. This should trigger release in 2 batches assert.NoError(t, fakecns.SetNumberOfAssignedIPs(3)) // Pool monitor does nothing, as the current number of IPs falls in the threshold diff --git a/cns/restserver/internalapi.go b/cns/restserver/internalapi.go index ceb9a6d8d6..692241312f 100644 --- a/cns/restserver/internalapi.go +++ b/cns/restserver/internalapi.go @@ -222,17 +222,17 @@ func (service *HTTPRestService) ReconcileNCState( return types.Success } - // If the NC was created successfully, then reconcile the allocated pod state + // If the NC was created successfully, then reconcile the assigned pod state returnCode := service.CreateOrUpdateNetworkContainerInternal(ncRequest) if returnCode != types.Success { return returnCode } service.IPAMPoolMonitor.Update(nnc) - // now parse the secondaryIP list, if it exists in PodInfo list, then allocate that ip + // now parse the secondaryIP list, if it exists in PodInfo list, then assign that ip. for _, secIpConfig := range ncRequest.SecondaryIPConfigs { if podInfo, exists := podInfoByIP[secIpConfig.IPAddress]; exists { - logger.Printf("SecondaryIP %+v is allocated to Pod. %+v, ncId: %s", secIpConfig, podInfo, ncRequest.NetworkContainerid) + logger.Printf("SecondaryIP %+v is assigned to Pod. %+v, ncId: %s", secIpConfig, podInfo, ncRequest.NetworkContainerid) jsonContext, err := podInfo.OrchestratorContext() if err != nil { @@ -252,7 +252,7 @@ func (service *HTTPRestService) ReconcileNCState( return types.FailedToAllocateIPConfig } } else { - logger.Printf("SecondaryIP %+v is not allocated. ncId: %s", secIpConfig, ncRequest.NetworkContainerid) + logger.Printf("SecondaryIP %+v is not assigned. ncId: %s", secIpConfig, ncRequest.NetworkContainerid) } } diff --git a/cns/restserver/internalapi_test.go b/cns/restserver/internalapi_test.go index 5e39cd5852..8331853977 100644 --- a/cns/restserver/internalapi_test.go +++ b/cns/restserver/internalapi_test.go @@ -198,8 +198,8 @@ func TestReconcileNCWithEmptyState(t *testing.T) { setOrchestratorTypeInternal(cns.KubernetesCRD) expectedNcCount := len(svc.state.ContainerStatus) - expectedAllocatedPods := make(map[string]cns.PodInfo) - returnCode := svc.ReconcileNCState(nil, expectedAllocatedPods, &v1alpha.NodeNetworkConfig{ + expectedAssignedPods := make(map[string]cns.PodInfo) + returnCode := svc.ReconcileNCState(nil, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -215,7 +215,7 @@ func TestReconcileNCWithEmptyState(t *testing.T) { t.Errorf("Unexpected failure on reconcile with no state %d", returnCode) } - validateNCStateAfterReconcile(t, nil, expectedNcCount, expectedAllocatedPods) + validateNCStateAfterReconcile(t, nil, expectedNcCount, expectedAssignedPods) } func TestReconcileNCWithExistingState(t *testing.T) { @@ -235,13 +235,13 @@ func TestReconcileNCWithExistingState(t *testing.T) { } req := generateNetworkContainerRequest(secondaryIPConfigs, "reconcileNc1", "-1") - expectedAllocatedPods := map[string]cns.PodInfo{ + expectedAssignedPods := map[string]cns.PodInfo{ "10.0.0.6": cns.NewPodInfo("", "", "reconcilePod1", "PodNS1"), "10.0.0.7": cns.NewPodInfo("", "", "reconcilePod2", "PodNS1"), } expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileNCState(req, expectedAllocatedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileNCState(req, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -257,7 +257,7 @@ func TestReconcileNCWithExistingState(t *testing.T) { t.Errorf("Unexpected failure on reconcile with no state %d", returnCode) } - validateNCStateAfterReconcile(t, req, expectedNcCount+1, expectedAllocatedPods) + validateNCStateAfterReconcile(t, req, expectedNcCount+1, expectedAssignedPods) } func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { @@ -279,13 +279,13 @@ func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { } req := generateNetworkContainerRequest(secondaryIPConfigs, "reconcileNc1", "-1") - expectedAllocatedPods := map[string]cns.PodInfo{ + expectedAssignedPods := map[string]cns.PodInfo{ "10.0.0.6": cns.NewPodInfo("abcdef", "recon1-eth0", "reconcilePod1", "PodNS1"), "10.0.0.7": cns.NewPodInfo("abcxyz", "recon2-eth0", "reconcilePod2", "PodNS1"), } expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileNCState(req, expectedAllocatedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileNCState(req, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -301,7 +301,7 @@ func TestReconcileNCWithExistingStateFromInterfaceID(t *testing.T) { t.Errorf("Unexpected failure on reconcile with no state %d", returnCode) } - validateNCStateAfterReconcile(t, req, expectedNcCount+1, expectedAllocatedPods) + validateNCStateAfterReconcile(t, req, expectedNcCount+1, expectedAssignedPods) } func TestReconcileNCWithSystemPods(t *testing.T) { @@ -321,14 +321,14 @@ func TestReconcileNCWithSystemPods(t *testing.T) { } req := generateNetworkContainerRequest(secondaryIPConfigs, uuid.New().String(), "-1") - expectedAllocatedPods := make(map[string]cns.PodInfo) - expectedAllocatedPods["10.0.0.6"] = cns.NewPodInfo("", "", "customerpod1", "PodNS1") + expectedAssignedPods := make(map[string]cns.PodInfo) + expectedAssignedPods["10.0.0.6"] = cns.NewPodInfo("", "", "customerpod1", "PodNS1") // Allocate non-vnet IP for system pod - expectedAllocatedPods["192.168.0.1"] = cns.NewPodInfo("", "", "systempod", "kube-system") + expectedAssignedPods["192.168.0.1"] = cns.NewPodInfo("", "", "systempod", "kube-system") expectedNcCount := len(svc.state.ContainerStatus) - returnCode := svc.ReconcileNCState(req, expectedAllocatedPods, &v1alpha.NodeNetworkConfig{ + returnCode := svc.ReconcileNCState(req, expectedAssignedPods, &v1alpha.NodeNetworkConfig{ Status: v1alpha.NodeNetworkConfigStatus{ Scaler: v1alpha.Scaler{ BatchSize: batchSize, @@ -344,8 +344,8 @@ func TestReconcileNCWithSystemPods(t *testing.T) { t.Errorf("Unexpected failure on reconcile with no state %d", returnCode) } - delete(expectedAllocatedPods, "192.168.0.1") - validateNCStateAfterReconcile(t, req, expectedNcCount, expectedAllocatedPods) + delete(expectedAssignedPods, "192.168.0.1") + validateNCStateAfterReconcile(t, req, expectedNcCount, expectedAssignedPods) } func setOrchestratorTypeInternal(orchestratorType string) { @@ -484,10 +484,10 @@ func validateNetworkRequest(t *testing.T, req cns.CreateNetworkContainerRequest) if ipStatus.PodInfo != nil { if _, exists := svc.PodIPIDByPodInterfaceKey[ipStatus.PodInfo.Key()]; exists { if ipStatus.State != types.Assigned { - t.Fatalf("IPId: %s State is not Allocated, ipStatus: %+v", ipid, ipStatus) + t.Fatalf("IPId: %s State is not Assigned, ipStatus: %+v", ipid, ipStatus) } } else { - t.Fatalf("Failed to find podContext for allocated ip: %+v, podinfo :%+v", ipStatus, ipStatus.PodInfo) + t.Fatalf("Failed to find podContext for assigned ip: %+v, podinfo :%+v", ipStatus, ipStatus.PodInfo) } } else if ipStatus.State != expectedIPStatus { // Todo: Validate for pendingRelease as well @@ -534,7 +534,7 @@ func generateNetworkContainerRequest(secondaryIps map[string]cns.SecondaryIPConf return &req } -func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkContainerRequest, expectedNcCount int, expectedAllocatedPods map[string]cns.PodInfo) { +func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkContainerRequest, expectedNcCount int, expectedAssignedPods map[string]cns.PodInfo) { if ncRequest == nil { // check svc ContainerStatus will be empty if len(svc.state.ContainerStatus) != expectedNcCount { @@ -544,16 +544,16 @@ func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkCon validateNetworkRequest(t, *ncRequest) } - if len(expectedAllocatedPods) != len(svc.PodIPIDByPodInterfaceKey) { - t.Fatalf("Unexpected allocated pods, actual: %d, expected: %d", len(svc.PodIPIDByPodInterfaceKey), len(expectedAllocatedPods)) + if len(expectedAssignedPods) != len(svc.PodIPIDByPodInterfaceKey) { + t.Fatalf("Unexpected assigned pods, actual: %d, expected: %d", len(svc.PodIPIDByPodInterfaceKey), len(expectedAssignedPods)) } - for ipaddress, podInfo := range expectedAllocatedPods { + for ipaddress, podInfo := range expectedAssignedPods { ipId := svc.PodIPIDByPodInterfaceKey[podInfo.Key()] ipConfigstate := svc.PodIPConfigState[ipId] if ipConfigstate.State != types.Assigned { - t.Fatalf("IpAddress %s is not marked as allocated for Pod: %+v, ipState: %+v", ipaddress, podInfo, ipConfigstate) + t.Fatalf("IpAddress %s is not marked as assigned to Pod: %+v, ipState: %+v", ipaddress, podInfo, ipConfigstate) } // Validate if IPAddress matches @@ -576,7 +576,7 @@ func validateNCStateAfterReconcile(t *testing.T, ncRequest *cns.CreateNetworkCon // validate rest of Secondary IPs in Available state if ncRequest != nil { for secIpId, secIpConfig := range ncRequest.SecondaryIPConfigs { - if _, exists := expectedAllocatedPods[secIpConfig.IPAddress]; exists { + if _, exists := expectedAssignedPods[secIpConfig.IPAddress]; exists { continue } diff --git a/cns/restserver/ipam.go b/cns/restserver/ipam.go index 5772960c8a..9146830077 100644 --- a/cns/restserver/ipam.go +++ b/cns/restserver/ipam.go @@ -40,7 +40,7 @@ func (service *HTTPRestService) requestIPConfigHandler(w http.ResponseWriter, r } // record a pod requesting an IP - service.podsPendingIPAllocation.Push(podInfo.Key()) + service.podsPendingIPAssignment.Push(podInfo.Key()) podIPInfo, err := requestIPConfigHelper(service, ipconfigRequest) if err != nil { @@ -56,11 +56,11 @@ func (service *HTTPRestService) requestIPConfigHandler(w http.ResponseWriter, r return } - // record a pod allocated an IP + // record a pod assigned an IP defer func() { - // observe allocation wait time - if since := service.podsPendingIPAllocation.Pop(podInfo.Key()); since > 0 { - ipAllocationLatency.Observe(float64(since)) + // observe IP assignment wait time + if since := service.podsPendingIPAssignment.Pop(podInfo.Key()); since > 0 { + ipAssignmentLatency.Observe(float64(since)) } }() reserveResp := &cns.IPConfigResponse{ @@ -279,9 +279,8 @@ func (service *HTTPRestService) GetPendingReleaseIPConfigs() []cns.IPConfigurati return filter.MatchAnyIPConfigState(service.PodIPConfigState, filter.StatePendingRelease) } -// SetIPConfigAsAllocated takes a lock of the service, and sets the ipconfig in the CNS state as allocated. -// Does not take a lock. -func (service *HTTPRestService) setIPConfigAsAllocated(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) error { +// assignIPConfig assigns the the ipconfig to the passed Pod, sets the state as Assigned, does not take a lock. +func (service *HTTPRestService) assignIPConfig(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) error { //nolint:gocritic // ignore hugeparam ipconfig, err := service.updateIPConfigState(ipconfig.ID, types.Assigned, podInfo) if err != nil { return err @@ -291,8 +290,8 @@ func (service *HTTPRestService) setIPConfigAsAllocated(ipconfig cns.IPConfigurat return nil } -// SetIPConfigAsAllocated and sets the ipconfig in the CNS state as allocated, does not take a lock -func (service *HTTPRestService) setIPConfigAsAvailable(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { +// unassignIPConfig unassigns the ipconfig from the passed Pod, sets the state as Available, does not take a lock. +func (service *HTTPRestService) unassignIPConfig(ipconfig cns.IPConfigurationStatus, podInfo cns.PodInfo) (cns.IPConfigurationStatus, error) { //nolint:gocritic // ignore hugeparam ipconfig, err := service.updateIPConfigState(ipconfig.ID, types.Available, nil) if err != nil { return cns.IPConfigurationStatus{}, err @@ -304,7 +303,6 @@ func (service *HTTPRestService) setIPConfigAsAvailable(ipconfig cns.IPConfigurat return ipconfig, nil } -////SetIPConfigAsAllocated takes a lock of the service, and sets the ipconfig in the CNS stateas Available // Todo - CNI should also pass the IPAddress which needs to be released to validate if that is the right IP allcoated // in the first place. func (service *HTTPRestService) releaseIPConfig(podInfo cns.PodInfo) error { @@ -315,7 +313,7 @@ func (service *HTTPRestService) releaseIPConfig(podInfo cns.PodInfo) error { if ipID != "" { if ipconfig, isExist := service.PodIPConfigState[ipID]; isExist { logger.Printf("[releaseIPConfig] Releasing IP %+v for pod %+v", ipconfig.IPAddress, podInfo) - _, err := service.setIPConfigAsAvailable(ipconfig, podInfo) + _, err := service.unassignIPConfig(ipconfig, podInfo) if err != nil { return fmt.Errorf("[releaseIPConfig] failed to mark IPConfig [%+v] as Available. err: %v", ipconfig, err) } @@ -341,7 +339,7 @@ func (service *HTTPRestService) MarkExistingIPsAsPending(pendingIPIDs []string) for _, id := range pendingIPIDs { if ipconfig, exists := service.PodIPConfigState[id]; exists { if ipconfig.State == types.Assigned { - return fmt.Errorf("Failed to mark IP [%v] as pending, currently allocated", id) + return errors.Errorf("Failed to mark IP [%v] as pending, currently assigned", id) } logger.Printf("[MarkExistingIPsAsPending]: Marking IP [%+v] to PendingRelease", ipconfig) @@ -377,30 +375,30 @@ func (service *HTTPRestService) GetExistingIPConfig(podInfo cns.PodInfo) (cns.Po return podIpInfo, isExist, nil } -func (service *HTTPRestService) AllocateDesiredIPConfig(podInfo cns.PodInfo, desiredIpAddress string) (cns.PodIpInfo, error) { +func (service *HTTPRestService) AssignDesiredIPConfig(podInfo cns.PodInfo, desiredIPAddress string) (cns.PodIpInfo, error) { var podIpInfo cns.PodIpInfo service.Lock() defer service.Unlock() for _, ipConfig := range service.PodIPConfigState { - if ipConfig.IPAddress == desiredIpAddress { + if ipConfig.IPAddress == desiredIPAddress { switch ipConfig.State { //nolint:exhaustive // ignoring PendingRelease case intentionally case types.Assigned: - // This IP has already been allocated, if it is allocated to same pod, then return the same + // This IP has already been assigned, if it is assigned to same pod, then return the same // IPconfiguration if ipConfig.PodInfo.Key() == podInfo.Key() { - logger.Printf("[AllocateDesiredIPConfig]: IP Config [%+v] is already allocated to this Pod [%+v]", ipConfig, podInfo) + logger.Printf("[AssignDesiredIPConfig]: IP Config [%+v] is already assigned to this Pod [%+v]", ipConfig, podInfo) } else { - return podIpInfo, fmt.Errorf("[AllocateDesiredIPConfig] Desired IP is already allocated %+v, requested for pod %+v", ipConfig, podInfo) + return podIpInfo, errors.Errorf("[AssignDesiredIPConfig] Desired IP is already assigned %+v, requested for pod %+v", ipConfig, podInfo) } case types.Available, types.PendingProgramming: // This race can happen during restart, where CNS state is lost and thus we have lost the NC programmed version - // As part of reconcile, we mark IPs as Allocated which are already allocated to PODs (listed from APIServer) - if err := service.setIPConfigAsAllocated(ipConfig, podInfo); err != nil { + // As part of reconcile, we mark IPs as Assigned which are already assigned to Pods (listed from APIServer) + if err := service.assignIPConfig(ipConfig, podInfo); err != nil { return podIpInfo, err } default: - return podIpInfo, fmt.Errorf("[AllocateDesiredIPConfig] Desired IP is not available %+v", ipConfig) + return podIpInfo, errors.Errorf("[AllocateDesiredIPConfig] Desired IP is not available %+v", ipConfig) } err := service.populateIPConfigInfoUntransacted(ipConfig, &podIpInfo) return podIpInfo, err @@ -409,13 +407,13 @@ func (service *HTTPRestService) AllocateDesiredIPConfig(podInfo cns.PodInfo, des return podIpInfo, fmt.Errorf("Requested IP not found in pool") } -func (service *HTTPRestService) AllocateAnyAvailableIPConfig(podInfo cns.PodInfo) (cns.PodIpInfo, error) { +func (service *HTTPRestService) AssignAnyAvailableIPConfig(podInfo cns.PodInfo) (cns.PodIpInfo, error) { service.Lock() defer service.Unlock() for _, ipState := range service.PodIPConfigState { if ipState.State == types.Available { - if err := service.setIPConfigAsAllocated(ipState, podInfo); err != nil { + if err := service.assignIPConfig(ipState, podInfo); err != nil { return cns.PodIpInfo{}, err } @@ -428,12 +426,12 @@ func (service *HTTPRestService) AllocateAnyAvailableIPConfig(podInfo cns.PodInfo } } //nolint:goerr113 - return cns.PodIpInfo{}, fmt.Errorf("no more free IPs available, waiting on Azure CNS to allocated more") + return cns.PodIpInfo{}, fmt.Errorf("no IPs available, waiting on Azure CNS to allocate more") } -// If IPConfig is already allocated for pod, it returns that else it returns one of the available ipconfigs. +// If IPConfig is already assigned to pod, it returns that else it returns one of the available ipconfigs. func requestIPConfigHelper(service *HTTPRestService, req cns.IPConfigRequest) (cns.PodIpInfo, error) { - // check if ipconfig already allocated for this pod and return if exists or error + // check if ipconfig already assigned tothis pod and return if exists or error // if error, ipstate is nil, if exists, ipstate is not nil and error is nil podInfo, err := cns.NewPodInfoFromIPConfigRequest(req) if err != nil { @@ -446,9 +444,9 @@ func requestIPConfigHelper(service *HTTPRestService, req cns.IPConfigRequest) (c // return desired IPConfig if req.DesiredIPAddress != "" { - return service.AllocateDesiredIPConfig(podInfo, req.DesiredIPAddress) + return service.AssignDesiredIPConfig(podInfo, req.DesiredIPAddress) } // return any free IPConfig - return service.AllocateAnyAvailableIPConfig(podInfo) + return service.AssignAnyAvailableIPConfig(podInfo) } diff --git a/cns/restserver/ipam_test.go b/cns/restserver/ipam_test.go index 0d58de5617..886a4f4889 100644 --- a/cns/restserver/ipam_test.go +++ b/cns/restserver/ipam_test.go @@ -186,7 +186,7 @@ func TestIPAMGetAvailableIPConfig(t *testing.T) { func TestIPAMGetNextAvailableIPConfig(t *testing.T) { svc := getTestService() - // Add already allocated pod ip to state + // Add already assigned pod ip to state svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = testPod1GUID state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, types.Available, 0) @@ -211,7 +211,7 @@ func TestIPAMGetNextAvailableIPConfig(t *testing.T) { if err != nil { t.Fatalf("Expected IP retrieval to be nil: %+v", err) } - // want second available Pod IP State as first has been allocated + // want second available Pod IP State as first has been assigned desiredState, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, types.Assigned, 24, 0, testPod2Info) if reflect.DeepEqual(desiredState, actualstate) != true { @@ -219,10 +219,10 @@ func TestIPAMGetNextAvailableIPConfig(t *testing.T) { } } -func TestIPAMGetAlreadyAllocatedIPConfigForSamePod(t *testing.T) { +func TestIPAMGetAlreadyAssignedIPConfigForSamePod(t *testing.T) { svc := getTestService() - // Add Allocated Pod IP to state + // Add Assigned Pod IP to state testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, @@ -314,10 +314,10 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T) { } } -func TestIPAMFailToGetDesiredIPConfigWithAlreadyAllocatedSpecfiedIP(t *testing.T) { +func TestIPAMFailToGetDesiredIPConfigWithAlreadyAssignedSpecfiedIP(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned testState, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ testState.ID: testState, @@ -327,7 +327,7 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAllocatedSpecfiedIP(t *testing.T t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - // request the already allocated ip with a new context + // request the already assigned ip with a new context req := cns.IPConfigRequest{ PodInterfaceID: testPod2Info.InterfaceID(), InfraContainerID: testPod2Info.InfraContainerID(), @@ -338,14 +338,14 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAllocatedSpecfiedIP(t *testing.T _, err = requestIpAddressAndGetState(t, req) if err == nil { - t.Fatalf("Expected failure requesting already allocated IP: %+v", err) + t.Fatalf("Expected failure requesting already assigned IP: %+v", err) } } -func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) { +func TestIPAMFailToGetIPWhenAllIPsAreAssigned(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) state2, _ := NewPodStateWithOrchestratorContext(testIP2, testPod2GUID, testNCID, types.Assigned, 24, 0, testPod2Info) @@ -358,7 +358,7 @@ func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) { t.Fatalf("Expected to not fail adding IPs to state: %+v", err) } - // request the already allocated ip with a new context + // request the already assigned ip with a new context req := cns.IPConfigRequest{} b, _ := testPod3Info.OrchestratorContext() req.OrchestratorContext = b @@ -376,7 +376,7 @@ func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) { func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -389,7 +389,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { desiredIpAddress := testIP1 - // Use TestPodInfo2 to request TestIP1, which has already been allocated + // Use TestPodInfo2 to request TestIP1, which has already been assigned req := cns.IPConfigRequest{ PodInterfaceID: testPod2Info.InterfaceID(), InfraContainerID: testPod2Info.InfraContainerID(), @@ -435,7 +435,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) { func TestIPAMReleaseIPIdempotency(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -461,7 +461,7 @@ func TestIPAMReleaseIPIdempotency(t *testing.T) { func TestIPAMAllocateIPIdempotency(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ @@ -501,9 +501,9 @@ func TestAvailableIPConfigs(t *testing.T) { availableIps := svc.GetAvailableIPConfigs() validateIpState(t, availableIps, desiredAvailableIps) - desiredAllocatedIpConfigs := make(map[string]cns.IPConfigurationStatus) - allocatedIps := svc.GetAssignedIPConfigs() - validateIpState(t, allocatedIps, desiredAllocatedIpConfigs) + desiredAssignedIPConfigs := make(map[string]cns.IPConfigurationStatus) + assignedIPs := svc.GetAssignedIPConfigs() + validateIpState(t, assignedIPs, desiredAssignedIPConfigs) req := cns.IPConfigRequest{ PodInterfaceID: testPod1Info.InterfaceID(), @@ -524,9 +524,9 @@ func TestAvailableIPConfigs(t *testing.T) { desiredState := NewPodState(testIP1, 24, testPod1GUID, testNCID, types.Assigned, 0) desiredState.PodInfo = testPod1Info - desiredAllocatedIpConfigs[desiredState.ID] = desiredState - allocatedIps = svc.GetAssignedIPConfigs() - validateIpState(t, allocatedIps, desiredAllocatedIpConfigs) + desiredAssignedIPConfigs[desiredState.ID] = desiredState + assignedIPs = svc.GetAssignedIPConfigs() + validateIpState(t, assignedIPs, desiredAssignedIPConfigs) } func validateIpState(t *testing.T, actualIps []cns.IPConfigurationStatus, expectedList map[string]cns.IPConfigurationStatus) { @@ -552,7 +552,7 @@ func validateIpState(t *testing.T, actualIps []cns.IPConfigurationStatus, expect func TestIPAMMarkIPCountAsPending(t *testing.T) { svc := getTestService() - // set state as already allocated + // set state as already assigned state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Available, 24, 0, testPod1Info) ipconfigs := map[string]cns.IPConfigurationStatus{ state1.ID: state1, @@ -694,7 +694,7 @@ func constructSecondaryIPConfigs(ipAddress, uuid string, ncVersion int, secondar func TestIPAMMarkExistingIPConfigAsPending(t *testing.T) { svc := getTestService() - // Add already allocated pod ip to state + // Add already assigned pod ip to state svc.PodIPIDByPodInterfaceKey[testPod1Info.Key()] = testPod1GUID state1, _ := NewPodStateWithOrchestratorContext(testIP1, testPod1GUID, testNCID, types.Assigned, 24, 0, testPod1Info) state2 := NewPodState(testIP2, 24, testPod2GUID, testNCID, types.Available, 0) @@ -720,15 +720,15 @@ func TestIPAMMarkExistingIPConfigAsPending(t *testing.T) { t.Fatalf("Expected to see ID %v in pending release ipconfigs, actual %+v", testPod2GUID, pendingIPConfigs) } - // attempt to mark allocated ipconfig as pending, expect fail + // attempt to mark assigned ipconfig as pending, expect fail pendingIPIDs = []string{testPod1GUID} err = svc.MarkExistingIPsAsPending(pendingIPIDs) if err == nil { - t.Fatalf("Expected to fail when marking allocated ip as pending") + t.Fatalf("Expected to fail when marking assigned ip as pending") } - allocatedIPConfigs := svc.GetAssignedIPConfigs() - if allocatedIPConfigs[0].ID != testPod1GUID { - t.Fatalf("Expected to see ID %v in pending release ipconfigs, actual %+v", testPod1GUID, allocatedIPConfigs) + assignedIPConfigs := svc.GetAssignedIPConfigs() + if assignedIPConfigs[0].ID != testPod1GUID { + t.Fatalf("Expected to see ID %v in pending release ipconfigs, actual %+v", testPod1GUID, assignedIPConfigs) } } diff --git a/cns/restserver/metrics.go b/cns/restserver/metrics.go index 67d4b1250b..91988348df 100644 --- a/cns/restserver/metrics.go +++ b/cns/restserver/metrics.go @@ -22,10 +22,10 @@ var httpRequestLatency = prometheus.NewHistogramVec( []string{"url", "verb"}, ) -var ipAllocationLatency = prometheus.NewHistogram( +var ipAssignmentLatency = prometheus.NewHistogram( prometheus.HistogramOpts{ - Name: "ip_allocation_latency_seconds", - Help: "IP allocation latency in seconds", + Name: "ip_assignment_latency_seconds", + Help: "Pod IP assignment latency in seconds", //nolint:gomnd // default bucket consts Buckets: prometheus.ExponentialBuckets(0.001, 2, 15), // 1 ms to ~16 seconds }, @@ -34,7 +34,7 @@ var ipAllocationLatency = prometheus.NewHistogram( func init() { metrics.Registry.MustRegister( httpRequestLatency, - ipAllocationLatency, + ipAssignmentLatency, ) } diff --git a/cns/restserver/restserver.go b/cns/restserver/restserver.go index 34cd95ee5e..d2e3ae8096 100644 --- a/cns/restserver/restserver.go +++ b/cns/restserver/restserver.go @@ -56,7 +56,7 @@ type HTTPRestService struct { routingTable *routes.RoutingTable store store.KeyValueStore state *httpRestServiceState - podsPendingIPAllocation *bounded.TimedSet + podsPendingIPAssignment *bounded.TimedSet sync.RWMutex dncPartitionKey string } @@ -157,7 +157,7 @@ func NewHTTPRestService(config *common.ServiceConfig, wscli interfaceGetter, nma PodIPConfigState: podIPConfigState, routingTable: routingTable, state: serviceState, - podsPendingIPAllocation: bounded.NewTimedSet(250), // nolint:gomnd // maxpods + podsPendingIPAssignment: bounded.NewTimedSet(250), // nolint:gomnd // maxpods }, nil } diff --git a/cns/restserver/util.go b/cns/restserver/util.go index 74c0345408..21b4c71c90 100644 --- a/cns/restserver/util.go +++ b/cns/restserver/util.go @@ -224,9 +224,9 @@ func (service *HTTPRestService) updateIPConfigsStateUntransacted( for ipID := range tobeDeletedIPConfigs { ipConfigStatus, exists := service.PodIPConfigState[ipID] if exists { - // pod ip exists, validate if state is not allocated, else fail + // pod ip exists, validate if state is not assigned, else fail if ipConfigStatus.State == types.Assigned { - errMsg := fmt.Sprintf("Failed to delete an Allocated IP %v", ipConfigStatus) + errMsg := fmt.Sprintf("Failed to delete an Assigned IP %v", ipConfigStatus) return types.InconsistentIPConfigState, errMsg } } @@ -320,9 +320,9 @@ func (service *HTTPRestService) removeToBeDeletedIPStateUntransacted( if !skipValidation { ipConfigStatus, exists := service.PodIPConfigState[ipID] if exists { - // pod ip exists, validate if state is not allocated, else fail + // pod ip exists, validate if state is not assigned, else fail if ipConfigStatus.State == types.Assigned { - errMsg := fmt.Sprintf("Failed to delete an Allocated IP %v", ipConfigStatus) + errMsg := fmt.Sprintf("Failed to delete an Assigned IP %v", ipConfigStatus) return types.InconsistentIPConfigState, errMsg } } diff --git a/cns/service/main.go b/cns/service/main.go index 06b8749b9b..19b614d928 100644 --- a/cns/service/main.go +++ b/cns/service/main.go @@ -240,7 +240,7 @@ var args = acn.ArgumentList{ { Name: acn.OptDebugCmd, Shorthand: acn.OptDebugCmdAlias, - Description: "Debug flag to retrieve IPconfigs, available values: allocated, available, all", + Description: "Debug flag to retrieve IPconfigs, available values: assigned, available, all", Type: "string", DefaultValue: "", }, diff --git a/cns/singletenantcontroller/metrics.go b/cns/singletenantcontroller/metrics.go index d36f5b0b06..9108f87408 100644 --- a/cns/singletenantcontroller/metrics.go +++ b/cns/singletenantcontroller/metrics.go @@ -6,7 +6,7 @@ import ( ) var ( - assignedIPs = prometheus.NewGauge( + allocatedIPs = prometheus.NewGauge( prometheus.GaugeOpts{ Name: "allocated_ips", Help: "Allocated IP count.", @@ -28,7 +28,7 @@ var ( func init() { metrics.Registry.MustRegister( - assignedIPs, + allocatedIPs, requestedIPs, unusedIPs, ) diff --git a/cns/singletenantcontroller/reconciler.go b/cns/singletenantcontroller/reconciler.go index 748196c4dc..0932e4924c 100644 --- a/cns/singletenantcontroller/reconciler.go +++ b/cns/singletenantcontroller/reconciler.go @@ -83,7 +83,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (reco r.ipampoolmonitorcli.Update(nnc) // record assigned IPs metric - assignedIPs.Set(float64(len(nnc.Status.NetworkContainers[0].IPAssignments))) + allocatedIPs.Set(float64(len(nnc.Status.NetworkContainers[0].IPAssignments))) return reconcile.Result{}, nil } diff --git a/cns/types/ipconfig.go b/cns/types/ipconfig.go index 69541d6b84..bcf405f18b 100644 --- a/cns/types/ipconfig.go +++ b/cns/types/ipconfig.go @@ -1,16 +1,16 @@ package types // IPState defines the possible states an IP can be in for CNS IPAM. +// The total pool that CNS has access to are its "allocated" IPs. type IPState string const ( - // Available IPConfigState for IPs available for CNS to use. - // The total pool that CNS has access to are its "allocated" IPs. + // Available IPConfigState for allocated IPs available for CNS to use. Available IPState = "Available" - // Assigned IPConfigState for IPs that CNS has assigned to Pods. + // Assigned IPConfigState for allocated IPs that CNS has assigned to Pods. Assigned IPState = "Assigned" - // PendingRelease IPConfigState for IPs pending release. + // PendingRelease IPConfigState for allocated IPs pending deallocation. PendingRelease IPState = "PendingRelease" - // PendingProgramming IPConfigState for IPs pending programming. + // PendingProgramming IPConfigState for allocated IPs pending programming. PendingProgramming IPState = "PendingProgramming" )