Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cns/NetworkContainerContract.go
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@ type IPConfigResponse struct {
}

// GetIPAddressesRequest is used in CNS IPAM mode to get the states of IPConfigs
// The IPConfigStateFilter is a slice of IP's to fetch from CNS that match those states
// The IPConfigStateFilter is a slice of IPs to fetch from CNS that match those states
type GetIPAddressesRequest struct {
IPConfigStateFilter []IPConfigState
}
Expand All @@ -394,7 +394,7 @@ type GetPodContextResponse struct {
Response Response
}

// IPAddressState Only used in the GetIPConfig API to return IP's that match a filter
// IPAddressState Only used in the GetIPConfig API to return IPs that match a filter
type IPAddressState struct {
IPAddress string
State string
Expand Down
12 changes: 7 additions & 5 deletions cns/ipampool/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,9 @@ func (pm *Monitor) reconcile(ctx context.Context) error {
return pm.decreasePoolSize(ctx, pendingReleaseIPCount)

// CRD has reconciled CNS state, and target spec is now the same size as the state
// free to remove the IP's from the CRD
// free to remove the IPs from the CRD
case len(pm.spec.IPsNotInUse) != pendingReleaseIPCount:
logger.Printf("[ipam-pool-monitor] Removing Pending Release IP's from CRD...%s ", msg)
logger.Printf("[ipam-pool-monitor] Removing Pending Release IPs from CRD...%s ", msg)
return pm.cleanPendingRelease(ctx)

// no pods scheduled
Expand Down Expand Up @@ -175,7 +175,8 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error {
return nil
}

logger.Printf("[ipam-pool-monitor] Increasing pool size, Current Pool Size: %v, Updated Requested IP Count: %v, Pods with IP's:%v, ToBeDeleted Count: %v", len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse))
logger.Printf("[ipam-pool-monitor] Increasing pool size, Current Pool Size: %v, Updated Requested IP Count: %v, Pods with IPs:%v, ToBeDeleted Count: %v",
len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse))

if _, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec); err != nil {
// caller will retry to update the CRD again
Expand All @@ -191,7 +192,7 @@ func (pm *Monitor) increasePoolSize(ctx context.Context) error {
}

func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseIPCount int) error {
// mark n number of IP's as pending
// mark n number of IPs as pending
var newIpsMarkedAsPending bool
var pendingIPAddresses map[string]cns.IPConfigurationStatus
var updatedRequestedIPCount int64
Expand Down Expand Up @@ -241,7 +242,8 @@ func (pm *Monitor) decreasePoolSize(ctx context.Context, existingPendingReleaseI
len(pendingIPAddresses), pm.state.notInUseCount)

tempNNCSpec.RequestedIPCount -= int64(len(pendingIPAddresses))
logger.Printf("[ipam-pool-monitor] Decreasing pool size, Current Pool Size: %v, Requested IP Count: %v, Pods with IP's: %v, ToBeDeleted Count: %v", len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse))
logger.Printf("[ipam-pool-monitor] Decreasing pool size, Current Pool Size: %v, Requested IP Count: %v, Pods with IPs: %v, ToBeDeleted Count: %v",
len(pm.httpService.GetPodIPConfigState()), tempNNCSpec.RequestedIPCount, len(pm.httpService.GetAllocatedIPConfigs()), len(tempNNCSpec.IPsNotInUse))

_, err := pm.nnccli.UpdateSpec(ctx, &tempNNCSpec)
if err != nil {
Expand Down
18 changes: 9 additions & 9 deletions cns/ipampool/monitor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func TestPoolSizeIncrease(t *testing.T) {
// ensure pool monitor has reached quorum with cns
assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount)

// request controller reconciles, carves new IP's from the test subnet and adds to CNS state
// request controller reconciles, carves new IPs from the test subnet and adds to CNS state
assert.NoError(t, fakerc.Reconcile(true))

// when poolmonitor reconciles again here, the IP count will be within the thresholds
Expand Down Expand Up @@ -111,7 +111,7 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) {
// When poolmonitor reconcile is called, trigger increase and cache goal state
assert.NoError(t, poolmonitor.reconcile(context.Background()))

// increase number of allocated IP's in CNS, within allocatable size but still inside trigger threshold
// increase number of allocated IPs in CNS, within allocatable size but still inside trigger threshold
assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(9))

// poolmonitor reconciles, but doesn't actually update the CRD, because there is already a pending update
Expand All @@ -120,7 +120,7 @@ func TestPoolIncreaseDoesntChangeWhenIncreaseIsAlreadyInProgress(t *testing.T) {
// ensure pool monitor has reached quorum with cns
assert.Equal(t, int64(initState.ipConfigCount+(1*initState.batchSize)), poolmonitor.spec.RequestedIPCount)

// request controller reconciles, carves new IP's from the test subnet and adds to CNS state
// request controller reconciles, carves new IPs from the test subnet and adds to CNS state
assert.NoError(t, fakerc.Reconcile(true))

// when poolmonitor reconciles again here, the IP count will be within the thresholds
Expand Down Expand Up @@ -213,10 +213,10 @@ func TestPoolDecrease(t *testing.T) {
fakecns, fakerc, poolmonitor := initFakes(initState)
assert.NoError(t, fakerc.Reconcile(true))

// Pool monitor does nothing, as the current number of IP's falls in the threshold
// Pool monitor does nothing, as the current number of IPs falls in the threshold
assert.NoError(t, poolmonitor.reconcile(context.Background()))

// Decrease the number of allocated IP's down to 5. This should trigger a scale down
// Decrease the number of allocated IPs down to 5. This should trigger a scale down
assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(4))

// Pool monitor will adjust the spec so the pool size will be 1 batch size smaller
Expand Down Expand Up @@ -247,7 +247,7 @@ func TestPoolSizeDecreaseWhenDecreaseHasAlreadyBeenRequested(t *testing.T) {
fakecns, fakerc, poolmonitor := initFakes(initState)
assert.NoError(t, fakerc.Reconcile(true))

// Pool monitor does nothing, as the current number of IP's falls in the threshold
// Pool monitor does nothing, as the current number of IPs falls in the threshold
assert.NoError(t, poolmonitor.reconcile(context.Background()))

// Ensure the size of the requested spec is still the same
Expand Down Expand Up @@ -331,13 +331,13 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) {
fakecns, fakerc, poolmonitor := initFakes(initState)
assert.NoError(t, fakerc.Reconcile(true))

// Pool monitor does nothing, as the current number of IP's falls in the threshold
// Pool monitor does nothing, as the current number of IPs falls in the threshold
assert.NoError(t, poolmonitor.reconcile(context.Background()))

// Now Drop the Allocated count to really low, say 3. This should trigger release in 2 batches
assert.NoError(t, fakecns.SetNumberOfAllocatedIPs(3))

// Pool monitor does nothing, as the current number of IP's falls in the threshold
// Pool monitor does nothing, as the current number of IPs falls in the threshold
assert.NoError(t, poolmonitor.reconcile(context.Background()))

// Ensure the size of the requested spec is still the same
Expand All @@ -363,7 +363,7 @@ func TestPoolSizeDecreaseToReallyLow(t *testing.T) {
func TestDecreaseAfterNodeLimitReached(t *testing.T) {
initState := state{
batchSize: 16,
allocatedIPCount: 20,
allocatedIPCount: 30,
ipConfigCount: 30,
requestThresholdPercent: 50,
releaseThresholdPercent: 150,
Expand Down
2 changes: 1 addition & 1 deletion cns/restserver/internalapi.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ func (service *HTTPRestService) ReconcileNCState(

err := service.MarkExistingIPsAsPending(nnc.Spec.IPsNotInUse)
if err != nil {
logger.Errorf("[Azure CNS] Error. Failed to mark IP's as pending %v", nnc.Spec.IPsNotInUse)
logger.Errorf("[Azure CNS] Error. Failed to mark IPs as pending %v", nnc.Spec.IPsNotInUse)
return types.UnexpectedError
}

Expand Down
28 changes: 14 additions & 14 deletions cns/restserver/ipam_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func TestIPAMGetNextAvailableIPConfig(t *testing.T) {
}
err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

req := cns.IPConfigRequest{
Expand Down Expand Up @@ -228,7 +228,7 @@ func TestIPAMGetAlreadyAllocatedIPConfigForSamePod(t *testing.T) {
}
err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

req := cns.IPConfigRequest{
Expand Down Expand Up @@ -261,7 +261,7 @@ func TestIPAMAttemptToRequestIPNotFoundInPool(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

req := cns.IPConfigRequest{
Expand Down Expand Up @@ -289,7 +289,7 @@ func TestIPAMGetDesiredIPConfigWithSpecfiedIP(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

req := cns.IPConfigRequest{
Expand Down Expand Up @@ -323,7 +323,7 @@ func TestIPAMFailToGetDesiredIPConfigWithAlreadyAllocatedSpecfiedIP(t *testing.T
}
err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

// request the already allocated ip with a new context
Expand Down Expand Up @@ -354,7 +354,7 @@ func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) {
}
err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

// request the already allocated ip with a new context
Expand All @@ -364,7 +364,7 @@ func TestIPAMFailToGetIPWhenAllIPsAreAllocated(t *testing.T) {

_, err = requestIpAddressAndGetState(t, req)
if err == nil {
t.Fatalf("Expected failure requesting IP when there are no more IP's: %+v", err)
t.Fatalf("Expected failure requesting IP when there are no more IPs: %+v", err)
}
}

Expand All @@ -383,7 +383,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

desiredIpAddress := testIP1
Expand All @@ -399,7 +399,7 @@ func TestIPAMRequestThenReleaseThenRequestAgain(t *testing.T) {

_, err = requestIpAddressAndGetState(t, req)
if err == nil {
t.Fatal("Expected failure requesting IP when there are no more IP's")
t.Fatal("Expected failure requesting IP when there are no more IPs")
}

// Release Test Pod 1
Expand Down Expand Up @@ -442,7 +442,7 @@ func TestIPAMReleaseIPIdempotency(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

// Release Test Pod 1
Expand All @@ -469,12 +469,12 @@ func TestIPAMAllocateIPIdempotency(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

err = UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}
}

Expand Down Expand Up @@ -559,7 +559,7 @@ func TestIPAMMarkIPCountAsPending(t *testing.T) {

err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

// Release Test Pod 1
Expand Down Expand Up @@ -704,7 +704,7 @@ func TestIPAMMarkExistingIPConfigAsPending(t *testing.T) {
}
err := UpdatePodIpConfigState(t, svc, ipconfigs)
if err != nil {
t.Fatalf("Expected to not fail adding IP's to state: %+v", err)
t.Fatalf("Expected to not fail adding IPs to state: %+v", err)
}

// mark available ip as as pending
Expand Down