diff --git a/common/ioshim_linux.go b/common/ioshim_linux.go index 1a5ed94b4c..48f895c1cd 100644 --- a/common/ioshim_linux.go +++ b/common/ioshim_linux.go @@ -1,8 +1,11 @@ package common import ( + "testing" + testutils "github.com/Azure/azure-container-networking/test/utils" utilexec "k8s.io/utils/exec" + testingexec "k8s.io/utils/exec/testing" ) type IOShim struct { @@ -20,3 +23,11 @@ func NewMockIOShim(calls []testutils.TestCmd) *IOShim { Exec: testutils.GetFakeExecWithScripts(calls), } } + +// VerifyCalls is used for Unit Testing with a mock ioshim. It asserts that the number of calls made is equal to the number given to the mock ioshim. +func (ioshim *IOShim) VerifyCalls(t *testing.T, calls []testutils.TestCmd) { + fexec, couldCast := ioshim.Exec.(*testingexec.FakeExec) + if couldCast { + testutils.VerifyCalls(t, fexec, calls) + } +} diff --git a/npm/pkg/dataplane/ioutil/grep.go b/npm/pkg/dataplane/ioutil/grep.go new file mode 100644 index 0000000000..e2e13f3602 --- /dev/null +++ b/npm/pkg/dataplane/ioutil/grep.go @@ -0,0 +1,35 @@ +package ioutil + +import utilexec "k8s.io/utils/exec" + +// Grep is the grep command string +const Grep = "grep" + +func PipeCommandToGrep(command, grepCommand utilexec.Cmd) (searchResults []byte, gotMatches bool, commandError error) { + pipe, commandError := command.StdoutPipe() + if commandError != nil { + return + } + closePipe := func() { _ = pipe.Close() } // appease go lint + defer closePipe() + + grepCommand.SetStdin(pipe) + commandError = command.Start() + if commandError != nil { + return + } + + // Without this wait, defunct iptable child process are created + wait := func() { _ = command.Wait() } // appease go lint + defer wait() + + output, err := grepCommand.CombinedOutput() + if err != nil { + // grep returns err status 1 if nothing is found + // but the other command's exit status gets propagated through this CombinedOutput, so we might have errors undetected + return + } + searchResults = output + gotMatches = true + return +} diff --git a/npm/pkg/dataplane/ioutil/grep_test.go b/npm/pkg/dataplane/ioutil/grep_test.go new file mode 100644 index 0000000000..4ea48e1337 --- /dev/null +++ b/npm/pkg/dataplane/ioutil/grep_test.go @@ -0,0 +1,92 @@ +package ioutil + +import ( + "testing" + + testutils "github.com/Azure/azure-container-networking/test/utils" + "github.com/stretchr/testify/require" +) + +func TestGrepMatch(t *testing.T) { + calls := []testutils.TestCmd{ + { + Cmd: []string{"some", "command"}, + PipedToCommand: true, + }, + { + Cmd: []string{"grep", "pattern"}, + Stdout: "1: here's the pattern we're looking for", + }, + { + Cmd: []string{"some", "command"}, + PipedToCommand: true, + }, + { + Cmd: []string{"grep", "pattern"}, + Stdout: "2: here's the pattern we're looking for", + }, + } + fexec := testutils.GetFakeExecWithScripts(calls) + defer testutils.VerifyCalls(t, fexec, calls) + + someCommand := fexec.Command("some", "command") + grepCommand := fexec.Command(Grep, "pattern") + output, gotMatches, err := PipeCommandToGrep(someCommand, grepCommand) + require.NoError(t, err) + require.True(t, gotMatches) + require.Equal(t, "1: here's the pattern we're looking for", string(output)) + + someCommand = fexec.Command("some", "command") + grepCommand = fexec.Command(Grep, "pattern") + output, gotMatches, err = PipeCommandToGrep(someCommand, grepCommand) + require.NoError(t, err) + require.True(t, gotMatches) + require.Equal(t, "2: here's the pattern we're looking for", string(output)) +} + +func TestGrepNoMatch(t *testing.T) { + calls := []testutils.TestCmd{ + { + Cmd: []string{"some", "command"}, + PipedToCommand: true, + ExitCode: 0, + }, + { + Cmd: []string{"grep", "pattern"}, + ExitCode: 1, + }, + } + fexec := testutils.GetFakeExecWithScripts(calls) + defer testutils.VerifyCalls(t, fexec, calls) + + someCommand := fexec.Command("some", "command") + grepCommand := fexec.Command(Grep, "pattern") + output, gotMatches, err := PipeCommandToGrep(someCommand, grepCommand) + require.NoError(t, err) + require.False(t, gotMatches) + require.Nil(t, output) +} + +func TestCommandStartError(t *testing.T) { + calls := []testutils.TestCmd{ + { + Cmd: []string{"some", "command"}, + HasStartError: true, + PipedToCommand: true, + ExitCode: 5, + }, + { + Cmd: []string{"grep", "pattern"}, + ExitCode: 0, + }, + } + fexec := testutils.GetFakeExecWithScripts(calls) + defer testutils.VerifyCalls(t, fexec, calls) + + someCommand := fexec.Command("some", "command") + grepCommand := fexec.Command(Grep, "pattern") + output, gotMatches, err := PipeCommandToGrep(someCommand, grepCommand) + require.Error(t, err) + require.False(t, gotMatches) + require.Nil(t, output) +} diff --git a/npm/pkg/dataplane/ioutil/file-creator.go b/npm/pkg/dataplane/ioutil/restore_linux.go similarity index 59% rename from npm/pkg/dataplane/ioutil/file-creator.go rename to npm/pkg/dataplane/ioutil/restore_linux.go index 0ef78c9ddc..666889d3a7 100644 --- a/npm/pkg/dataplane/ioutil/file-creator.go +++ b/npm/pkg/dataplane/ioutil/restore_linux.go @@ -8,12 +8,10 @@ import ( "strings" "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/log" npmerrors "github.com/Azure/azure-container-networking/npm/util/errors" + "k8s.io/klog" ) -// TODO add file creator log prefix - // FileCreator is a tool for: // - building a buffer file // - running a command with the file @@ -56,19 +54,24 @@ type ErrorDefinition struct { type LineErrorHandler struct { Definition *ErrorDefinition Method LineErrorHandlerMethod - Reason string Callback func() } // LineErrorHandlerMethod defines behavior when an error occurs type LineErrorHandlerMethod string -// possible LineErrorHandlerMethod const ( - SkipLine LineErrorHandlerMethod = "skip" - AbortSection LineErrorHandlerMethod = "abort" + // Continue specifies skipping this line and all previous lines + Continue LineErrorHandlerMethod = "continue" + // ContinueAndAbortSection specifies skipping this line, all previous lines, and all lines tied to this line's section + ContinueAndAbortSection LineErrorHandlerMethod = "continue-and-abort" + + anyMatchPattern = ".*" ) +// AlwaysMatchDefinition will match any error +var AlwaysMatchDefinition = NewErrorDefinition(anyMatchPattern) + func NewFileCreator(ioShim *common.IOShim, maxTryCount int, lineFailurePatterns ...string) *FileCreator { creator := &FileCreator{ lines: make([]*Line, 0), @@ -125,27 +128,26 @@ func (creator *FileCreator) RunCommandWithFile(cmd string, args ...string) error fileString := creator.ToString() wasFileAltered, err := creator.runCommandOnceWithFile(fileString, cmd, args...) if err == nil { - // success return nil } - for { - commandString := cmd + " " + strings.Join(args, " ") - if creator.hasNoMoreRetries() { - // TODO conditionally specify as retriable? - return npmerrors.Errorf(npmerrors.RunFileCreator, false, fmt.Sprintf("failed to run command [%s] with error: %v", commandString, err)) - } + commandString := cmd + " " + strings.Join(args, " ") + for !creator.hasNoMoreRetries() { if wasFileAltered { fileString = creator.ToString() - log.Logf("rerunning command [%s] with new file:\n%s", commandString, fileString) + klog.Infof("rerunning command [%s] with new file:\n%s", commandString, fileString) } else { - log.Logf("rerunning command [%s] with the same file", commandString) + klog.Infof("rerunning command [%s] with the same file", commandString) } wasFileAltered, err = creator.runCommandOnceWithFile(fileString, cmd, args...) if err == nil { - // success + klog.Infof("successfully ran command [%s] on try number %d", commandString, creator.tryCount) return nil } } + errString := fmt.Sprintf("failed to run command [%s] with error: %v", commandString, err) + klog.Error(errString) + // TODO conditionally specify as retriable? + return npmerrors.Errorf(npmerrors.RunFileCreator, false, errString) } // RunCommandOnceWithFile runs the command with the file once and increments the try count. @@ -160,8 +162,15 @@ func (creator *FileCreator) RunCommandOnceWithFile(cmd string, args ...string) ( return creator.runCommandOnceWithFile(fileString, cmd, args...) } +// returns whether the file was altered and any error // TODO return another bool that specifies if there was a file-level retriable error? func (creator *FileCreator) runCommandOnceWithFile(fileString, cmd string, args ...string) (bool, error) { + commandString := cmd + " " + strings.Join(args, " ") + if fileString == "" { // NOTE this wouldn't prevent us from running an iptables restore file with just "COMMIT\n" + klog.Infof("returning as a success without running command [%s] since the fileString is empty", commandString) + return false, nil + } + command := creator.ioShim.Exec.Command(cmd, args...) command.SetStdin(bytes.NewBufferString(fileString)) @@ -173,27 +182,30 @@ func (creator *FileCreator) runCommandOnceWithFile(fileString, cmd string, args } creator.tryCount++ - commandString := cmd + " " + strings.Join(args, " ") stdErr := string(stdErrBytes) - log.Errorf("on try number %d, failed to run command [%s] with error [%v] and stdErr [%s]. Used file:\n%s", creator.tryCount, commandString, err, stdErr, fileString) + klog.Errorf("on try number %d, failed to run command [%s] with error [%v] and stdErr [%s]. Used file:\n%s", creator.tryCount, commandString, err, stdErr, fileString) if creator.hasNoMoreRetries() { return false, fmt.Errorf("after %d tries, failed with final error [%w] and stdErr [%s]", creator.tryCount, err, stdErr) } // begin the retry logic if creator.hasFileLevelError(stdErr) { - log.Logf("detected a file-level error after running command [%s]", commandString) + klog.Infof("detected a file-level error after running command [%s]", commandString) return false, fmt.Errorf("file-level error: %w", err) } // no file-level error, so handle line-level error if there is one - lineNum := creator.getErrorLineNumber(commandString, stdErr) - if lineNum == -1 { - // can't detect a line number error - return false, fmt.Errorf("can't discern error: %w", err) + numLines := creator.numLines() + for _, lineFailureDefinition := range creator.lineFailureDefinitions { + lineNum := lineFailureDefinition.getErrorLineNumber(stdErr, commandString, numLines) + if lineNum != -1 { + klog.Infof("detected a line number error on line %d", lineNum) + wasFileAltered := creator.handleLineError(stdErr, commandString, lineNum) + return wasFileAltered, fmt.Errorf("tried to handle line number error: %w", err) + } } - wasFileAltered := creator.handleLineError(lineNum, commandString, stdErr) - return wasFileAltered, fmt.Errorf("tried to handle line number error: %w", err) + klog.Infof("couldn't detect a line number error") + return false, fmt.Errorf("can't discern error: %w", err) } func (creator *FileCreator) hasNoMoreRetries() bool { @@ -210,34 +222,38 @@ func (creator *FileCreator) hasFileLevelError(stdErr string) bool { } func (definition *ErrorDefinition) isMatch(stdErr string) bool { - return definition.re.MatchString(stdErr) + return definition.matchPattern == anyMatchPattern || definition.re.MatchString(stdErr) +} + +func (creator *FileCreator) numLines() int { + return len(creator.lines) - len(creator.lineNumbersToOmit) } // return -1 if there's a failure -func (creator *FileCreator) getErrorLineNumber(commandString, stdErr string) int { - for _, definition := range creator.lineFailureDefinitions { - result := definition.re.FindStringSubmatch(stdErr) - if result == nil || len(result) < 2 { - log.Logf("expected error with line number, but couldn't detect one with error regex pattern [%s] for command [%s] with stdErr [%s]", definition.matchPattern, commandString, stdErr) - continue - } - lineNumString := result[1] - lineNum, err := strconv.Atoi(lineNumString) - if err != nil { - log.Logf("expected error with line number, but error regex pattern %s didn't produce a number for command [%s] with stdErr [%s]", definition.matchPattern, commandString, stdErr) - continue - } - if lineNum < 1 || lineNum > len(creator.lines) { - log.Logf("expected error with line number, but error regex pattern %s produced an invalid line number %d for command [%s] with stdErr [%s]", definition.matchPattern, lineNum, commandString, stdErr) - continue - } - return lineNum +func (definition *ErrorDefinition) getErrorLineNumber(stdErr, commandString string, numLines int) int { + result := definition.re.FindStringSubmatch(stdErr) + if result == nil || len(result) < 2 { + klog.Infof("expected error with line number, but couldn't detect one with error regex pattern [%s] for command [%s] with stdErr [%s]", definition.matchPattern, commandString, stdErr) + return -1 + } + lineNumString := result[1] + lineNum, err := strconv.Atoi(lineNumString) + if err != nil { + klog.Infof("expected error with line number, but error regex pattern %s didn't produce a number for command [%s] with stdErr [%s]", definition.matchPattern, commandString, stdErr) + return -1 + } + if lineNum < 1 || lineNum > numLines { + klog.Infof( + "expected error with line number, but error regex pattern %s produced an invalid line number %d for command [%s] with stdErr [%s]", + definition.matchPattern, lineNum, commandString, stdErr, + ) + return -1 } - return -1 + return lineNum } // return whether the file was altered -func (creator *FileCreator) handleLineError(lineNum int, commandString, stdErr string) bool { +func (creator *FileCreator) handleLineError(stdErr, commandString string, lineNum int) bool { lineNumIndex := lineNum - 1 line := creator.lines[lineNumIndex] for _, errorHandler := range line.errorHandlers { @@ -245,26 +261,24 @@ func (creator *FileCreator) handleLineError(lineNum int, commandString, stdErr s continue } switch errorHandler.Method { - case SkipLine: - log.Errorf("skipping line %d for command [%s]", lineNumIndex, commandString) - creator.lineNumbersToOmit[lineNumIndex] = struct{}{} - errorHandler.Callback() - return true - case AbortSection: - log.Errorf("aborting section associated with line %d for command [%s]", lineNumIndex, commandString) - section, exists := creator.sections[line.sectionID] - if !exists { - log.Errorf("can't abort section because line references section %d which doesn't exist, so skipping the line instead", line.sectionID) - creator.lineNumbersToOmit[lineNumIndex] = struct{}{} - } else { - for _, lineNum := range section.lineNums { - creator.lineNumbersToOmit[lineNum] = struct{}{} - } + case Continue: + klog.Errorf("continuing after line %d for command [%s]", lineNum, commandString) + for i := 0; i <= lineNumIndex; i++ { + creator.lineNumbersToOmit[i] = struct{}{} + } + case ContinueAndAbortSection: + klog.Errorf("continuing after line %d and aborting section associated with the line for command [%s]", lineNum, commandString) + for i := 0; i <= lineNumIndex; i++ { + creator.lineNumbersToOmit[i] = struct{}{} + } + section := creator.sections[line.sectionID] + for _, lineNum := range section.lineNums { + creator.lineNumbersToOmit[lineNum] = struct{}{} } } errorHandler.Callback() return true } - log.Logf("no error handler for line %d for command [%s] with stdErr [%s]", lineNum, commandString, stdErr) + klog.Infof("no error handler for line %d for command [%s] with stdErr [%s]", lineNum, commandString, stdErr) return false } diff --git a/npm/pkg/dataplane/ioutil/file-creator_test.go b/npm/pkg/dataplane/ioutil/restore_linux_test.go similarity index 57% rename from npm/pkg/dataplane/ioutil/file-creator_test.go rename to npm/pkg/dataplane/ioutil/restore_linux_test.go index 215a246339..b04c7d2f4b 100644 --- a/npm/pkg/dataplane/ioutil/file-creator_test.go +++ b/npm/pkg/dataplane/ioutil/restore_linux_test.go @@ -1,7 +1,6 @@ package ioutil import ( - "fmt" "testing" "github.com/Azure/azure-container-networking/common" @@ -17,11 +16,15 @@ const ( section2ID = "section2" ) -var fakeSuccessCommand = testutils.TestCmd{ - Cmd: []string{testCommandString}, - Stdout: "success", - ExitCode: 0, -} +var ( + fakeSuccessCommand = testutils.TestCmd{ + Cmd: []string{testCommandString}, + } + fakeFailureCommand = testutils.TestCmd{ + Cmd: []string{testCommandString}, + ExitCode: 1, + } +) func TestToStringAndSections(t *testing.T) { creator := NewFileCreator(common.NewMockIOShim(nil), 1) @@ -51,51 +54,92 @@ line3-item1 line3-item2 line3-item3 func TestRunCommandWithFile(t *testing.T) { calls := []testutils.TestCmd{fakeSuccessCommand} creator := NewFileCreator(common.NewMockIOShim(calls), 1) + creator.AddLine("", nil, "line1") require.NoError(t, creator.RunCommandWithFile(testCommandString)) } -func TestRecoveryForFileLevelError(t *testing.T) { - calls := []testutils.TestCmd{ - { - Cmd: []string{testCommandString}, - Stdout: "file-level error", - ExitCode: 4, - }, - fakeSuccessCommand, - } +func TestRunCommandWhenFileIsEmpty(t *testing.T) { + calls := []testutils.TestCmd{fakeSuccessCommand} + creator := NewFileCreator(common.NewMockIOShim(calls), 1) + wasFileAltered, err := creator.RunCommandOnceWithFile(testCommandString) + require.False(t, wasFileAltered) + require.NoError(t, err) +} + +func TestRunCommandSuccessAfterRecovery(t *testing.T) { + calls := []testutils.TestCmd{fakeFailureCommand, fakeSuccessCommand} creator := NewFileCreator(common.NewMockIOShim(calls), 2) - creator.AddErrorToRetryOn(NewErrorDefinition("file-level error")) + creator.AddLine("", nil, "line1") require.NoError(t, creator.RunCommandWithFile(testCommandString)) } -func TestRecoveryForLineError(t *testing.T) { +func TestRunCommandFailureFromNoMoreTries(t *testing.T) { + calls := []testutils.TestCmd{fakeFailureCommand} + creator := NewFileCreator(common.NewMockIOShim(calls), 1) + creator.AddLine("", nil, "line1") + require.Error(t, creator.RunCommandWithFile(testCommandString)) +} + +func TestRunCommandOnceWithNoMoreTries(t *testing.T) { + creator := NewFileCreator(common.NewMockIOShim(nil), 0) + _, err := creator.RunCommandOnceWithFile(testCommandString) + require.Error(t, err) +} + +func TestRecoveryForFileLevelErrors(t *testing.T) { + knownFileLevelErrorCommand := testutils.TestCmd{ + Cmd: []string{testCommandString}, + Stdout: "file-level error over here", + ExitCode: 1, + } + unknownFileLevelErrorCommand := testutils.TestCmd{ + Cmd: []string{testCommandString}, + Stdout: "not sure what's wrong", + ExitCode: 1, + } calls := []testutils.TestCmd{ - { - Cmd: []string{testCommandString}, - Stdout: "failure on line 2", - ExitCode: 4, - }, + knownFileLevelErrorCommand, + unknownFileLevelErrorCommand, + unknownFileLevelErrorCommand, fakeSuccessCommand, } - creator := NewFileCreator(common.NewMockIOShim(calls), 2, "failure on line (\\d+)") + creator := NewFileCreator(common.NewMockIOShim(calls), 4) + creator.AddErrorToRetryOn(NewErrorDefinition("file-level error")) + creator.AddLine("", nil, "line1") + wasFileAltered, err := creator.RunCommandOnceWithFile(testCommandString) + require.False(t, wasFileAltered) + require.Error(t, err) + wasFileAltered, err = creator.RunCommandOnceWithFile(testCommandString) + require.False(t, wasFileAltered) + require.Error(t, err) require.NoError(t, creator.RunCommandWithFile(testCommandString)) } -func TestTotalFailureAfterRetries(t *testing.T) { - errorCommand := testutils.TestCmd{ +func TestRecoveryWhenFileAltered(t *testing.T) { + fakeErrorCommand := testutils.TestCmd{ Cmd: []string{testCommandString}, - Stdout: "some error", - ExitCode: 4, + Stdout: "failure on line 2: match-pattern do something please", + ExitCode: 1, } - calls := []testutils.TestCmd{errorCommand, errorCommand, errorCommand} - creator := NewFileCreator(common.NewMockIOShim(calls), 2) - require.Error(t, creator.RunCommandWithFile(testCommandString)) + calls := []testutils.TestCmd{fakeErrorCommand, fakeSuccessCommand} + creator := NewFileCreator(common.NewMockIOShim(calls), 2, "failure on line (\\d+)") + errorHandlers := []*LineErrorHandler{ + { + Definition: NewErrorDefinition("match-pattern"), + Method: Continue, + Callback: func() { log.Logf("'continue' callback") }, + }, + } + creator.AddLine(section1ID, nil, "line1-item1", "line1-item2", "line1-item3") + creator.AddLine(section2ID, errorHandlers, "line2-item1", "line2-item2", "line2-item3") + creator.AddLine(section1ID, nil, "line3-item1", "line3-item2", "line3-item3") + require.NoError(t, creator.RunCommandWithFile(testCommandString)) } -func TestHandleLineErrorForAbortSection(t *testing.T) { +func TestHandleLineErrorForContinueAndAbortSection(t *testing.T) { fakeErrorCommand := testutils.TestCmd{ Cmd: []string{testCommandString}, - Stdout: "failure on line 1: match-pattern do something please", + Stdout: "failure on line 2: match-pattern do something please", ExitCode: 1, } calls := []testutils.TestCmd{fakeErrorCommand} @@ -104,28 +148,27 @@ func TestHandleLineErrorForAbortSection(t *testing.T) { // first error handler doesn't match (include this to make sure the real match gets reached) { Definition: NewErrorDefinition("abc"), - Method: AbortSection, - Reason: "", + Method: ContinueAndAbortSection, Callback: func() {}, }, { Definition: NewErrorDefinition("match-pattern"), - Method: AbortSection, - Reason: "error requiring us to abort section", - Callback: func() { log.Logf("abort section callback") }, + Method: ContinueAndAbortSection, + Callback: func() { log.Logf("'continue and abort section' callback") }, }, } - creator.AddLine(section1ID, errorHandlers, "line1-item1", "line1-item2", "line1-item3") - creator.AddLine(section2ID, nil, "line2-item1", "line2-item2", "line2-item3") + creator.AddLine(section1ID, nil, "line1-item1", "line1-item2", "line1-item3") + creator.AddLine(section2ID, errorHandlers, "line2-item1", "line2-item2", "line2-item3") creator.AddLine(section1ID, nil, "line3-item1", "line3-item2", "line3-item3") + creator.AddLine(section2ID, nil, "line4-item1", "line4-item2", "line4-item3") wasFileAltered, err := creator.RunCommandOnceWithFile(testCommandString) require.Error(t, err) require.True(t, wasFileAltered) fileString := creator.ToString() - assert.Equal(t, "line2-item1 line2-item2 line2-item3\n", fileString) + assert.Equal(t, "line3-item1 line3-item2 line3-item3\n", fileString) } -func TestHandleLineErrorForSkipLine(t *testing.T) { +func TestHandleLineErrorForContinue(t *testing.T) { fakeErrorCommand := testutils.TestCmd{ Cmd: []string{testCommandString}, Stdout: "failure on line 2: match-pattern do something please", @@ -136,19 +179,19 @@ func TestHandleLineErrorForSkipLine(t *testing.T) { errorHandlers := []*LineErrorHandler{ { Definition: NewErrorDefinition("match-pattern"), - Method: SkipLine, - Reason: "error requiring us to skip this line", - Callback: func() { log.Logf("skip line callback") }, + Method: Continue, + Callback: func() { log.Logf("'continue' callback") }, }, } creator.AddLine("", nil, "line1-item1", "line1-item2", "line1-item3") creator.AddLine("", errorHandlers, "line2-item1", "line2-item2", "line2-item3") creator.AddLine("", nil, "line3-item1", "line3-item2", "line3-item3") + creator.AddLine("", errorHandlers, "line4-item1", "line4-item2", "line4-item3") wasFileAltered, err := creator.RunCommandOnceWithFile(testCommandString) require.Error(t, err) require.True(t, wasFileAltered) fileString := creator.ToString() - assert.Equal(t, "line1-item1 line1-item2 line1-item3\nline3-item1 line3-item2 line3-item3\n", fileString) + assert.Equal(t, "line3-item1 line3-item2 line3-item3\nline4-item1 line4-item2 line4-item3\n", fileString) } func TestHandleLineErrorNoMatch(t *testing.T) { @@ -162,8 +205,7 @@ func TestHandleLineErrorNoMatch(t *testing.T) { errorHandlers := []*LineErrorHandler{ { Definition: NewErrorDefinition("abc"), - Method: AbortSection, - Reason: "", + Method: ContinueAndAbortSection, Callback: func() {}, }, } @@ -178,10 +220,14 @@ func TestHandleLineErrorNoMatch(t *testing.T) { require.Equal(t, fileStringBefore, fileStringAfter) } +func TestAlwaysMatchDefinition(t *testing.T) { + require.True(t, AlwaysMatchDefinition.isMatch("123456789asdfghjklxcvbnm, jklfdsa7")) +} + func TestGetErrorLineNumber(t *testing.T) { type args struct { - lineFailurePatterns []string - stdErr string + lineFailurePattern string + stdErr string } tests := []struct { @@ -192,7 +238,7 @@ func TestGetErrorLineNumber(t *testing.T) { { "pattern that doesn't match", args{ - []string{"abc"}, + "abc", "xyz", }, -1, @@ -200,7 +246,7 @@ func TestGetErrorLineNumber(t *testing.T) { { "matching pattern with no group", args{ - []string{"abc"}, + "abc", "abc", }, -1, @@ -208,7 +254,7 @@ func TestGetErrorLineNumber(t *testing.T) { { "matching pattern with non-numeric group", args{ - []string{"(abc)"}, + "(abc)", "abc", }, -1, @@ -216,7 +262,7 @@ func TestGetErrorLineNumber(t *testing.T) { { "stderr gives an out-of-bounds line number", args{ - []string{"line (\\d+)"}, + "line (\\d+)", "line 777", }, -1, @@ -224,17 +270,7 @@ func TestGetErrorLineNumber(t *testing.T) { { "good line match", args{ - []string{"line (\\d+)"}, - `there was a failure - on line 11 where the failure happened - fix it please`, - }, - 11, - }, - { - "good line match with other pattern that doesn't match", - args{ - []string{"abc", "line (\\d+)"}, + "line (\\d+)", `there was a failure on line 11 where the failure happened fix it please`, @@ -245,15 +281,11 @@ func TestGetErrorLineNumber(t *testing.T) { commandString := "test command" for _, tt := range tests { - lineFailurePatterns := tt.args.lineFailurePatterns + lineFailureDefinition := NewErrorDefinition(tt.args.lineFailurePattern) expectedLineNum := tt.expectedLineNum stdErr := tt.args.stdErr t.Run(tt.name, func(t *testing.T) { - creator := NewFileCreator(common.NewMockIOShim(nil), 2, lineFailurePatterns...) - for i := 0; i < 15; i++ { - creator.AddLine("", nil, fmt.Sprintf("line%d", i)) - } - lineNum := creator.getErrorLineNumber(commandString, stdErr) + lineNum := lineFailureDefinition.getErrorLineNumber(stdErr, commandString, 15) require.Equal(t, expectedLineNum, lineNum) }) } diff --git a/npm/pkg/dataplane/ipsets/ipset.go b/npm/pkg/dataplane/ipsets/ipset.go index f1f509b0a3..8721102ab9 100644 --- a/npm/pkg/dataplane/ipsets/ipset.go +++ b/npm/pkg/dataplane/ipsets/ipset.go @@ -3,7 +3,6 @@ package ipsets import ( "errors" "fmt" - "reflect" "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/npm/util" @@ -272,39 +271,6 @@ func (set *IPSet) ShallowCompare(newSet *IPSet) bool { return true } -// Compare checks if two ipsets are same -func (set *IPSet) Compare(newSet *IPSet) bool { - if set.Name != newSet.Name { - return false - } - if set.Kind != newSet.Kind { - return false - } - if set.Type != newSet.Type { - return false - } - if set.Kind == HashSet { - if len(set.IPPodKey) != len(newSet.IPPodKey) { - return false - } - for podIP := range set.IPPodKey { - if _, ok := newSet.IPPodKey[podIP]; !ok { - return false - } - } - } else { - if len(set.MemberIPSets) != len(newSet.MemberIPSets) { - return false - } - for _, memberSet := range set.MemberIPSets { - if _, ok := newSet.MemberIPSets[memberSet.HashedName]; !ok { - return false - } - } - } - return true -} - func (set *IPSet) incIPSetReferCount() { set.ipsetReferCount++ } @@ -403,7 +369,3 @@ func (set *IPSet) canSetBeSelectorIPSet() bool { set.Type == Namespace || set.Type == NestedLabelOfPod) } - -func (ipset *TranslatedIPSet) Equals(otherIPSet *TranslatedIPSet) bool { - return reflect.DeepEqual(ipset, otherIPSet) -} diff --git a/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go b/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go index 1e553ef398..c145d9dea5 100644 --- a/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go +++ b/npm/pkg/dataplane/ipsets/ipsetmanager_linux.go @@ -2,21 +2,59 @@ package ipsets import ( "fmt" + "regexp" - "github.com/Azure/azure-container-networking/log" "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ioutil" + "github.com/Azure/azure-container-networking/npm/pkg/dataplane/parse" "github.com/Azure/azure-container-networking/npm/util" + npmerrors "github.com/Azure/azure-container-networking/npm/util/errors" + "k8s.io/klog" ) const ( - maxTryCount = 1 - deletionPrefix = "delete" - creationPrefix = "create" + azureNPMPrefix = "azure-npm-" + + ipsetCommand = "ipset" + ipsetSaveFlag = "save" + ipsetRestoreFlag = "restore" + ipsetCreateFlag = "-N" + ipsetFlushFlag = "-F" + ipsetAddFlag = "-A" + ipsetDeleteFlag = "-D" + ipsetDestroyFlag = "-X" + ipsetExistFlag = "--exist" + ipsetNetHashFlag = "nethash" + ipsetSetListFlag = "setlist" + ipsetIPPortHashFlag = "hash:ip,port" + ipsetMaxelemName = "maxelem" + ipsetMaxelemNum = "4294967295" + + // constants for parsing ipset save + ipsetAddString = "add" + ipsetCreateString = "create" + ipsetSetListString = "list:set" + ipsetNetHashString = "hash:net" + ipsetIPPortHashString = ipsetIPPortHashFlag + lengthForSuccessfulMatch = 2 + + // creator constants + maxTryCount = 3 + destroySectionPrefix = "delete" + addOrUpdateSectionPrefix = "add/update" ipsetRestoreLineFailurePattern = "Error in line (\\d+):" - setAlreadyExistsPattern = "Set cannot be created: set with the same name already exists" - setDoesntExistPattern = "The set with the given name does not exist" - setInUseByKernelPattern = "Set cannot be destroyed: it is in use by a kernel component" - memberSetDoesntExist = "Set to be added/deleted/tested as element does not exist" +) + +var ( + // creator variables + setDoesntExistDefinition = ioutil.NewErrorDefinition("The set with the given name does not exist") + setInUseByKernelDefinition = ioutil.NewErrorDefinition("Set cannot be destroyed: it is in use by a kernel component") + setAlreadyExistsDefinition = ioutil.NewErrorDefinition("Set cannot be created: set with the same name already exists") + memberSetDoesntExistDefinition = ioutil.NewErrorDefinition("Set to be added/deleted/tested as element does not exist") + + // variables for parsing ipset save + hashedNamePattern = fmt.Sprintf(`%s\d+`, azureNPMPrefix) // match azure-npm- + nameForCreateRegex = regexp.MustCompile(fmt.Sprintf("%s (%s) ", ipsetCreateString, hashedNamePattern)) + nameForAddRegex = regexp.MustCompile(fmt.Sprintf("%s (%s) ", ipsetAddString, hashedNamePattern)) ) func (iMgr *IPSetManager) resetIPSets() error { @@ -29,153 +67,414 @@ func (iMgr *IPSetManager) resetIPSets() error { return nil } -// don't need networkID +/* +overall error handling for ipset restore file. +ipset restore will apply all lines to the kernel before a failure, so when recovering from a line failure, we must skip the lines that were already applied. +below, "set" refers to either hashset or list, except in the sections for adding to (hash)set and adding to list + +for flush/delete: +- abort the flush and delete calls if flush doesn't work + - checks if set doesn't exist, but performs the same handling for any error +- skip the delete if it fails, and mark it as a failure (TODO) + - checks if the set is in use by kernel component, but performs the same handling for any error + +for create: +- abort create and add/delete calls if create doesn't work + - checks if the set/list already exists, but performs the same handling for any error + +for add to set: +- skip add if it fails + +for add to list: +- skip the add if it fails, and mark it as a failure (TODO) + - checks if the member set can't be added to a list because it doesn't exist, but performs the same handling for any error + +for delete: +- skip the delete if it fails for any reason + +overall format for ipset restore file: + [flushes] (random order) + [destroys] (random order) + [creates] (random order) + [deletes and adds for sets already in the kernel] (in order of occurrence in save file, deletes first (in random order), then adds (in random order)) + [adds for new sets] (random order for sets and members) + +example where every set in add/update cache should have ip 1.2.3.4 and 2.3.4.5: + save file showing current kernel state: + create set-in-kernel-1 net:hash ... + add set-in-kernel-1 1.2.3.4 + add set-in-kernel-1 8.8.8.8 + add set-in-kernel-1 9.9.9.9 + create set-in-kernel-2 net:hash ... + add set-in-kernel-1 3.3.3.3 + + restore file: [flag meanings: -F (flush), -X (destroy), -N (create), -D (delete), -A (add)] + -F set-to-delete2 + -F set-to-delete3 + -F set-to-delete1 + -X set-to-delete2 + -X set-to-delete3 + -X set-to-delete1 + -N new-set-2 + -N set-in-kernel-2 + -N set-in-kernel-1 + -N new-set-1 + -N new-set-3 + -D set-in-kernel-1 8.8.8.8 + -D set-in-kernel-1 9.9.9.9 + -A set-in-kernel-1 2.3.4.5 + -D set-in-kernel-2 3.3.3.3 + -A set-in-kernel-2 2.3.4.5 + -A set-in-kernel-2 1.2.3.4 + -A new-set-2 1.2.3.4 + -A new-set-2 2.3.4.5 + -A new-set-1 2.3.4.5 + -A new-set-1 1.2.3.4 + -A new-set-3 1.2.3.4 + -A new-set-3 2.3.4.5 + +*/ + func (iMgr *IPSetManager) applyIPSets() error { - toDeleteSetNames := convertAndDeleteCache(iMgr.toDeleteCache) - toAddOrUpdateSetNames := convertAndDeleteCache(iMgr.toAddOrUpdateCache) - creator := iMgr.getFileCreator(maxTryCount, toDeleteSetNames, toAddOrUpdateSetNames) - err := creator.RunCommandWithFile(util.Ipset, util.IpsetRestoreFlag) - if err != nil { - return fmt.Errorf("%w", err) + var saveFile []byte + var saveError error + if len(iMgr.toAddOrUpdateCache) > 0 { + saveFile, saveError = iMgr.ipsetSave() + if saveError != nil { + return npmerrors.SimpleErrorWrapper("ipset save failed when applying ipsets", saveError) + } + } + creator := iMgr.fileCreator(maxTryCount, saveFile) + restoreError := creator.RunCommandWithFile(ipsetCommand, ipsetRestoreFlag) + if restoreError != nil { + return npmerrors.SimpleErrorWrapper("ipset restore failed when applying ipsets", restoreError) } return nil } -func convertAndDeleteCache(cache map[string]struct{}) []string { - result := make([]string, len(cache)) - i := 0 - for setName := range cache { - result[i] = setName - delete(cache, setName) - i++ +func (iMgr *IPSetManager) ipsetSave() ([]byte, error) { + command := iMgr.ioShim.Exec.Command(ipsetCommand, ipsetSaveFlag) + grepCommand := iMgr.ioShim.Exec.Command(ioutil.Grep, azureNPMPrefix) + searchResults, gotMatches, err := ioutil.PipeCommandToGrep(command, grepCommand) + if err != nil { + return nil, npmerrors.SimpleErrorWrapper("failed to run ipset save", err) + } + if !gotMatches { + return nil, nil } - return result + return searchResults, nil } -// getFileCreator encodes an ipset restore file with error handling. -// We use slices instead of maps so we can have determinstic behavior for -// unit tests on the file creator i.e. check file contents before and after error handling. -// Without slices, we could do unit tests on certain segments of the file, -// but things would get complicated for checking error handling. -// We can't escape the nondeterministic behavior of adding members, -// but we can handle this in UTs with sorting. -func (iMgr *IPSetManager) getFileCreator(maxTryCount int, toDeleteSetNames, toAddOrUpdateSetNames []string) *ioutil.FileCreator { - creator := ioutil.NewFileCreator(iMgr.ioShim, maxTryCount, ipsetRestoreLineFailurePattern) - // creator.AddErrorToRetryOn(ioutil.NewErrorDefinition("something")) // TODO add file-level errors? - iMgr.handleDeletions(creator, toDeleteSetNames) - iMgr.handleAddOrUpdates(creator, toAddOrUpdateSetNames) - return creator -} +func (iMgr *IPSetManager) fileCreator(maxTryCount int, saveFile []byte) *ioutil.FileCreator { + creator := ioutil.NewFileCreator(iMgr.ioShim, maxTryCount, ipsetRestoreLineFailurePattern) // TODO make the line failure pattern into a definition constant eventually -func (iMgr *IPSetManager) handleDeletions(creator *ioutil.FileCreator, setNames []string) { - // flush all first so we don't try to delete an ipset referenced by a list we're deleting too - // error handling: - // - abort the flush and delete call for a set if the set doesn't exist - // - if the set is in use by a kernel component, then skip the delete and mark it as a failure - for _, setName := range setNames { - setName := setName // to appease golint complaints about function literal - errorHandlers := []*ioutil.LineErrorHandler{ - { - Definition: ioutil.NewErrorDefinition(setDoesntExistPattern), - Method: ioutil.AbortSection, - Callback: func() { - // no action needed since we expect that it's gone after applyIPSets() - log.Logf("was going to delete set %s but it doesn't exist", setName) - }, - }, - } - sectionID := getSectionID(deletionPrefix, setName) - hashedSetName := util.GetHashedName(setName) - creator.AddLine(sectionID, errorHandlers, util.IpsetFlushFlag, hashedSetName) // flush set + // flush all sets first so we don't try to delete an ipset referenced by a list we're deleting too + for prefixedName := range iMgr.toDeleteCache { + iMgr.flushSetInFile(creator, prefixedName) + } + for prefixedName := range iMgr.toDeleteCache { + iMgr.destroySetInFile(creator, prefixedName) } - for _, setName := range setNames { - setName := setName // to appease golint complaints about function literal - errorHandlers := []*ioutil.LineErrorHandler{ - { - Definition: ioutil.NewErrorDefinition(setInUseByKernelPattern), - Method: ioutil.SkipLine, - Callback: func() { - log.Errorf("was going to delete set %s but it is in use by a kernel component", setName) - // TODO mark the set as a failure and reconcile what iptables rule or ipset is referring to it - }, - }, + // create all sets first so we don't try to add a member set to a list if it hasn't been created yet + for prefixedName := range iMgr.toAddOrUpdateCache { + set := iMgr.setMap[prefixedName] + iMgr.createSetInFile(creator, set) + } + + // for dirty sets already in the kernel, update members (add members not in the kernel, and delete undesired members in the kernel) + iMgr.updateDirtyKernelSets(saveFile, creator) + + // for the remaining dirty sets, add their members to the kernel + for prefixedName := range iMgr.toAddOrUpdateCache { + set := iMgr.setMap[prefixedName] + sectionID := sectionID(addOrUpdateSectionPrefix, prefixedName) + if set.Kind == HashSet { + for ip := range set.IPPodKey { + iMgr.addMemberInFile(creator, set, sectionID, ip) + } + } else { + for _, member := range set.MemberIPSets { + iMgr.addMemberInFile(creator, set, sectionID, member.HashedName) + } } - sectionID := getSectionID(deletionPrefix, setName) - hashedSetName := util.GetHashedName(setName) - creator.AddLine(sectionID, errorHandlers, util.IpsetDestroyFlag, hashedSetName) // destroy set } + return creator } -func (iMgr *IPSetManager) handleAddOrUpdates(creator *ioutil.FileCreator, setNames []string) { - // create all sets first - // error handling: - // - abort the create, flush, and add calls if create doesn't work - // Won't abort adding the set to a list. Will need another retry to handle that - // TODO change this behavior? - for _, setName := range setNames { - set := iMgr.setMap[setName] - - methodFlag := util.IpsetNetHashFlag - if set.Kind == ListSet { - methodFlag = util.IpsetSetListFlag - } else if set.Type == NamedPorts { - methodFlag = util.IpsetIPPortHashFlag - } +// updates the creator (adds/deletes members) for dirty sets already in the kernel +// updates the toAddOrUpdateCache: after calling this function, the cache will only consist of sets to create +// error handling principal: +// - if contract with ipset save (or grep) is breaking, salvage what we can, take a snapshot without grep, and log the failure +// - have a background process for sending/removing snapshots intermittently +func (iMgr *IPSetManager) updateDirtyKernelSets(saveFile []byte, creator *ioutil.FileCreator) { + toAddOrUpdateHashedNames := make(map[string]string) // map hashed names to prefixed names + for prefixedName := range iMgr.toAddOrUpdateCache { + hashedName := iMgr.setMap[prefixedName].HashedName + toAddOrUpdateHashedNames[hashedName] = prefixedName + } - specs := []string{util.IpsetCreationFlag, set.HashedName, util.IpsetExistFlag, methodFlag} - if set.Type == CIDRBlocks { - specs = append(specs, util.IpsetMaxelemName, util.IpsetMaxelemNum) + klog.Infof("beginning to parse ipset save file:\nBEGIN-IPSET-SAVE-FILE-FOR-APPLY-IPSETS\n%s\nEND-IPSET-SAVE-FILE-FOR-APPLY-IPSETS", string(saveFile)) // TODO remove eventually + + // each iteration reads a create line and any ensuing add lines + readIndex := 0 + var line []byte + if readIndex < len(saveFile) { + line, readIndex = parse.Line(readIndex, saveFile) + } + for readIndex < len(saveFile) { + createMatches := nameForCreateRegex.FindSubmatch(line) + if len(createMatches) != lengthForSuccessfulMatch { + klog.Errorf("expected a create line with an azure-npm set in ipset save file, but got the following line: %s", string(line)) + // TODO send error snapshot + line, readIndex = nextCreateLine(readIndex, saveFile) + continue } - setName := setName // to appease golint complaints about function literal - errorHandlers := []*ioutil.LineErrorHandler{ - { - Definition: ioutil.NewErrorDefinition(setAlreadyExistsPattern), - Method: ioutil.AbortSection, - Callback: func() { - log.Errorf("was going to add/update set %s but couldn't create the set", setName) - // TODO mark the set as a failure and handle this - }, - }, + hashedName := string(createMatches[1]) + prefixedName, shouldModify := toAddOrUpdateHashedNames[hashedName] + if !shouldModify { + line, readIndex = nextCreateLine(readIndex, saveFile) + continue } - sectionID := getSectionID(creationPrefix, setName) - creator.AddLine(sectionID, errorHandlers, specs...) // create set - } - // flush and add all IPs/members for each set - // error handling: - // - if a member set can't be added to a list because it doesn't exist, then skip the add and mark it as a failure - for _, setName := range setNames { - set := iMgr.setMap[setName] - sectionID := getSectionID(creationPrefix, setName) - creator.AddLine(sectionID, nil, util.IpsetFlushFlag, set.HashedName) // flush set (no error handler needed) + // update the set from the kernel + set := iMgr.setMap[prefixedName] + delete(iMgr.toAddOrUpdateCache, prefixedName) // remove from the dirty cache so we don't add it later + // not necessary, but improves performance? The TestUpdateWithBadSaveFile UT currently relies on this too (ignore create for set that we saw a create for earlier) + delete(toAddOrUpdateHashedNames, hashedName) + + // check for consistent type + restOfLine := line[len(createMatches[0]):] + if haveTypeProblem(set, restOfLine) { + // error logging happens in the helper function + // TODO send error snapshot + line, readIndex = nextCreateLine(readIndex, saveFile) + continue + } + // get desired members from cache + var membersToAdd map[string]struct{} if set.Kind == HashSet { + membersToAdd = make(map[string]struct{}, len(set.IPPodKey)) for ip := range set.IPPodKey { - // TODO add error handler? - creator.AddLine(sectionID, nil, util.IpsetAppendFlag, set.HashedName, ip) // add IP + membersToAdd[ip] = struct{}{} } } else { - setName := setName // to appease golint complaints about function literal + membersToAdd = make(map[string]struct{}, len(set.IPPodKey)) for _, member := range set.MemberIPSets { - memberName := member.Name // to appease golint complaints about function literal - errorHandlers := []*ioutil.LineErrorHandler{ - { - Definition: ioutil.NewErrorDefinition(memberSetDoesntExist), - Method: ioutil.SkipLine, - Callback: func() { - log.Errorf("was going to add member set %s to list %s, but the member doesn't exist", memberName, setName) - // TODO handle error - }, - }, - } - creator.AddLine(sectionID, errorHandlers, util.IpsetAppendFlag, set.HashedName, member.HashedName) // add member + membersToAdd[member.HashedName] = struct{}{} + } + } + + // determine which members to add/delete + membersToDelete := make(map[string]struct{}) + for readIndex < len(saveFile) { + line, readIndex = parse.Line(readIndex, saveFile) + if hasPrefix(line, ipsetCreateString) { + break } + addMatches := nameForAddRegex.FindSubmatch(line) + if len(addMatches) != lengthForSuccessfulMatch || string(addMatches[1]) != hashedName { + klog.Errorf("expected an add line for set %s in ipset save file, but got the following line: %s", hashedName, string(line)) + // TODO send error snapshot + line, readIndex = nextCreateLine(readIndex, saveFile) + break + } + restOfLine = line[len(addMatches[0]):] + member := string(restOfLine) + _, shouldKeep := membersToAdd[member] + if shouldKeep { + delete(membersToAdd, member) // member already in the kernel, so don't add it later + } else { + membersToDelete[member] = struct{}{} // member should be deleted from the kernel + } + } + + // delete undesired members from restore file + sectionID := sectionID(addOrUpdateSectionPrefix, prefixedName) + for member := range membersToDelete { + iMgr.deleteMemberInFile(creator, set, sectionID, member) + } + // add new members to restore file + for member := range membersToAdd { + iMgr.addMemberInFile(creator, set, sectionID, member) + } + } +} + +func nextCreateLine(originalReadIndex int, saveFile []byte) (createLine []byte, nextReadIndex int) { + nextReadIndex = originalReadIndex + for nextReadIndex < len(saveFile) { + createLine, nextReadIndex = parse.Line(nextReadIndex, saveFile) + createMatches := nameForCreateRegex.FindSubmatch(createLine) + if len(createMatches) == lengthForSuccessfulMatch { + return + } + } + return +} + +func haveTypeProblem(set *IPSet, restOfCreateLine []byte) bool { + // TODO check type based on maxelem for hash sets? CIDR blocks have a different maxelem + switch { + case hasPrefix(restOfCreateLine, ipsetSetListString): + if set.Kind != ListSet { + lineString := fmt.Sprintf("create %s %s", set.HashedName, string(restOfCreateLine)) // reconstruct the line for log + klog.Errorf("expected to find a ListSet but have the line: %s", lineString) + return true + } + case hasPrefix(restOfCreateLine, ipsetNetHashString): + if set.Kind != HashSet || set.Type == NamedPorts { + lineString := fmt.Sprintf("create %s %s", set.HashedName, string(restOfCreateLine)) // reconstruct the line for log + klog.Errorf("expected to find a non-NamedPorts HashSet but have the following line: %s", lineString) + return true + } + case hasPrefix(restOfCreateLine, ipsetIPPortHashString): + if set.Type != NamedPorts { + lineString := fmt.Sprintf("create %s %s", set.HashedName, string(restOfCreateLine)) // reconstruct the line for log + klog.Errorf("expected to find a NamedPorts set but have the following line: %s", lineString) + return true + } + } + return false +} + +func hasPrefix(line []byte, prefix string) bool { + return len(line) >= len(prefix) && string(line[:len(prefix)]) == prefix +} + +func (iMgr *IPSetManager) flushSetInFile(creator *ioutil.FileCreator, prefixedName string) { + errorHandlers := []*ioutil.LineErrorHandler{ + { + Definition: setDoesntExistDefinition, + Method: ioutil.ContinueAndAbortSection, + Callback: func() { + // no action needed + klog.Infof("skipping flush and upcoming delete for set %s since the set doesn't exist", prefixedName) + }, + }, + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.ContinueAndAbortSection, + Callback: func() { + klog.Errorf("skipping flush and upcoming delete for set %s due to unknown error", prefixedName) + // TODO mark as a failure + // would this ever happen? + }, + }, + } + sectionID := sectionID(destroySectionPrefix, prefixedName) + hashedName := util.GetHashedName(prefixedName) + creator.AddLine(sectionID, errorHandlers, ipsetFlushFlag, hashedName) +} + +func (iMgr *IPSetManager) destroySetInFile(creator *ioutil.FileCreator, prefixedName string) { + errorHandlers := []*ioutil.LineErrorHandler{ + { + Definition: setInUseByKernelDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping destroy line for set %s since the set is in use by a kernel component", prefixedName) + // TODO mark the set as a failure and reconcile what iptables rule or ipset is referring to it + }, + }, + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping destroy line for set %s due to unknown error", prefixedName) + }, + }, + } + sectionID := sectionID(destroySectionPrefix, prefixedName) + hashedName := util.GetHashedName(prefixedName) + creator.AddLine(sectionID, errorHandlers, ipsetDestroyFlag, hashedName) // destroy set +} + +func (iMgr *IPSetManager) createSetInFile(creator *ioutil.FileCreator, set *IPSet) { + methodFlag := ipsetNetHashFlag + if set.Kind == ListSet { + methodFlag = ipsetSetListFlag + } else if set.Type == NamedPorts { + methodFlag = ipsetIPPortHashFlag + } + + specs := []string{ipsetCreateFlag, set.HashedName, ipsetExistFlag, methodFlag} + if set.Type == CIDRBlocks { + specs = append(specs, ipsetMaxelemName, ipsetMaxelemNum) + } + + prefixedName := set.Name // to appease golint complaints about function literal + errorHandlers := []*ioutil.LineErrorHandler{ + { + Definition: setAlreadyExistsDefinition, + Method: ioutil.ContinueAndAbortSection, + Callback: func() { + klog.Errorf("skipping create and any following adds/deletes for set %s since the set already exists with different specs", prefixedName) + // TODO mark the set as a failure and handle this + }, + }, + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.ContinueAndAbortSection, + Callback: func() { + klog.Errorf("skipping create and any following adds/deletes for set %s due to unknown error", prefixedName) + // TODO same as above error handler + }, + }, + } + sectionID := sectionID(addOrUpdateSectionPrefix, prefixedName) + creator.AddLine(sectionID, errorHandlers, specs...) // create set +} + +func (iMgr *IPSetManager) deleteMemberInFile(creator *ioutil.FileCreator, set *IPSet, sectionID, member string) { + errorHandlers := []*ioutil.LineErrorHandler{ + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping delete line for set %s due to unknown error", set.Name) + }, + }, + } + creator.AddLine(sectionID, errorHandlers, ipsetDeleteFlag, set.HashedName, member) +} + +func (iMgr *IPSetManager) addMemberInFile(creator *ioutil.FileCreator, set *IPSet, sectionID, member string) { + var errorHandlers []*ioutil.LineErrorHandler + if set.Kind == ListSet { + errorHandlers = []*ioutil.LineErrorHandler{ + { + Definition: memberSetDoesntExistDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping add of %s to list %s since the member doesn't exist", member, set.Name) + // TODO reconcile + }, + }, + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping add of %s to list %s due to unknown error", member, set.Name) + }, + }, + } + } else { + errorHandlers = []*ioutil.LineErrorHandler{ + { + Definition: ioutil.AlwaysMatchDefinition, + Method: ioutil.Continue, + Callback: func() { + klog.Errorf("skipping add line for hash set %s due to unknown error", set.Name) + }, + }, } } + creator.AddLine(sectionID, errorHandlers, ipsetAddFlag, set.HashedName, member) } -func getSectionID(prefix, setName string) string { - return fmt.Sprintf("%s-%s", prefix, setName) +func sectionID(prefix, prefixedName string) string { + return fmt.Sprintf("%s-%s", prefix, prefixedName) } diff --git a/npm/pkg/dataplane/ipsets/ipsetmanager_linux_test.go b/npm/pkg/dataplane/ipsets/ipsetmanager_linux_test.go index dcdb92778e..8125ed4cb1 100644 --- a/npm/pkg/dataplane/ipsets/ipsetmanager_linux_test.go +++ b/npm/pkg/dataplane/ipsets/ipsetmanager_linux_test.go @@ -2,398 +2,914 @@ package ipsets import ( "fmt" + "regexp" "sort" "strings" "testing" "github.com/Azure/azure-container-networking/common" - "github.com/Azure/azure-container-networking/npm/pkg/dataplane/ioutil" dptestutils "github.com/Azure/azure-container-networking/npm/pkg/dataplane/testutils" testutils "github.com/Azure/azure-container-networking/test/utils" "github.com/stretchr/testify/require" ) -var ( - iMgrApplyAllCfg = &IPSetManagerCfg{ - IPSetMode: ApplyAllIPSets, - NetworkName: "", - } +const saveResult = "create test-list1 list:set size 8\nadd test-list1 test-list2" - ipsetRestoreStringSlice = []string{"ipset", "restore"} -) +var iMgrApplyAllCfg = &IPSetManagerCfg{ + IPSetMode: ApplyAllIPSets, + NetworkName: "", +} + +// TODO test that a reconcile list is updated for all the TestFailure UTs +// TODO same exact TestFailure UTs for unknown errors func TestDestroyNPMIPSets(t *testing.T) { - calls := []testutils.TestCmd{} // TODO - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + // TODO + calls := []testutils.TestCmd{} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + require.NoError(t, iMgr.resetIPSets()) } -func TestConvertAndDeleteCache(t *testing.T) { - cache := map[string]struct{}{ - "a": {}, - "b": {}, - "c": {}, - "d": {}, - } - slice := convertAndDeleteCache(cache) - require.Equal(t, 0, len(cache)) - require.Equal(t, 4, len(slice)) - for _, item := range []string{"a", "b", "c", "d"} { - success := false - for _, sliceItem := range slice { - if item == sliceItem { - success = true - } - } - if !success { - require.FailNowf(t, "%s not in the slice", item) - } +func TestApplyIPSetsSuccessWithoutSave(t *testing.T) { + // no sets to add/update, so don't call ipset save + calls := []testutils.TestCmd{{Cmd: ipsetRestoreStringSlice}} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + // delete a set so the file isn't empty (otherwise the creator won't even call the exec command) + iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestNSSet.PrefixName) + err := iMgr.applyIPSets() + require.NoError(t, err) +} + +func TestApplyIPSetsSuccessWithSave(t *testing.T) { + // no sets to add/update, so don't call ipset save + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, PipedToCommand: true}, + {Cmd: []string{"grep", "azure-npm-"}}, + {Cmd: ipsetRestoreStringSlice}, + } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + // create a set so we run ipset save + iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) + err := iMgr.applyIPSets() + require.NoError(t, err) +} + +func TestApplyIPSetsFailureOnSave(t *testing.T) { + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, HasStartError: true, PipedToCommand: true, ExitCode: 1}, + {Cmd: []string{"grep", "azure-npm-"}}, } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + // create a set so we run ipset save + iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) + err := iMgr.applyIPSets() + require.Error(t, err) } -// create all possible SetTypes -func TestApplyCreationsAndAdds(t *testing.T) { - calls := []testutils.TestCmd{fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) +func TestApplyIPSetsFailureOnRestore(t *testing.T) { + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, PipedToCommand: true}, + {Cmd: []string{"grep", "azure-npm-"}}, + // fail 3 times because this is our max try count + {Cmd: ipsetRestoreStringSlice, ExitCode: 1}, + {Cmd: ipsetRestoreStringSlice, ExitCode: 1}, + {Cmd: ipsetRestoreStringSlice, ExitCode: 1}, + } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) - lines := []string{ - fmt.Sprintf("-N %s -exist nethash", TestNSSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestKeyPodSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestKVPodSet.HashedName), - fmt.Sprintf("-N %s -exist hash:ip,port", TestNamedportSet.HashedName), - fmt.Sprintf("-N %s -exist nethash maxelem 4294967295", TestCIDRSet.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestKeyNSList.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestKVNSList.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestNestedLabelList.HashedName), - } - lines = append(lines, getSortedLines(TestNSSet, "10.0.0.0", "10.0.0.1")...) - lines = append(lines, getSortedLines(TestKeyPodSet, "10.0.0.5")...) - lines = append(lines, getSortedLines(TestKVPodSet)...) - lines = append(lines, getSortedLines(TestNamedportSet)...) - lines = append(lines, getSortedLines(TestCIDRSet)...) - lines = append(lines, getSortedLines(TestKeyNSList, TestNSSet.HashedName, TestKeyPodSet.HashedName)...) - lines = append(lines, getSortedLines(TestKVNSList, TestKVPodSet.HashedName)...) - lines = append(lines, getSortedLines(TestNestedLabelList)...) - expectedFileString := strings.Join(lines, "\n") + "\n" + // create a set so we run ipset save + iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) + err := iMgr.applyIPSets() + require.Error(t, err) +} + +func TestApplyIPSetsRecoveryForFailureOnRestore(t *testing.T) { + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, PipedToCommand: true}, + {Cmd: []string{"grep", "azure-npm-"}}, + {Cmd: ipsetRestoreStringSlice, ExitCode: 1}, + {Cmd: ipsetRestoreStringSlice}, + } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + // create a set so we run ipset save iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) + err := iMgr.applyIPSets() + require.NoError(t, err) +} + +func TestIPSetSave(t *testing.T) { + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, PipedToCommand: true}, + {Cmd: []string{"grep", "azure-npm-"}, Stdout: saveResult}, + } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + output, err := iMgr.ipsetSave() + require.NoError(t, err) + require.Equal(t, saveResult, string(output)) +} + +func TestIPSetSaveNoMatch(t *testing.T) { + calls := []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, ExitCode: 1}, + {Cmd: []string{"grep", "azure-npm-"}}, + } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + output, err := iMgr.ipsetSave() + require.NoError(t, err) + require.Nil(t, output) +} + +func TestCreateForAllSetTypes(t *testing.T) { + // without save file + calls := []testutils.TestCmd{fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyPodSet.Metadata}) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "10.0.0.5", "c")) iMgr.CreateIPSets([]*IPSetMetadata{TestKVPodSet.Metadata}) iMgr.CreateIPSets([]*IPSetMetadata{TestNamedportSet.Metadata}) iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyNSList.Metadata}) require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata, TestKeyPodSet.Metadata})) - iMgr.CreateIPSets([]*IPSetMetadata{TestKVNSList.Metadata}) require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKVNSList.Metadata}, []*IPSetMetadata{TestKVPodSet.Metadata})) iMgr.CreateIPSets([]*IPSetMetadata{TestNestedLabelList.Metadata}) - toAddOrUpdateSetNames := []string{ - TestNSSet.PrefixName, - TestKeyPodSet.PrefixName, - TestKVPodSet.PrefixName, - TestNamedportSet.PrefixName, - TestCIDRSet.PrefixName, - TestKeyNSList.PrefixName, - TestKVNSList.PrefixName, - TestNestedLabelList.PrefixName, - } - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - - creator := iMgr.getFileCreator(1, nil, toAddOrUpdateSetNames) - actualFileString := getSortedFileString(creator) - - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + + creator := iMgr.fileCreator(len(calls), nil) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + + lines := []string{ + fmt.Sprintf("-N %s --exist nethash", TestNSSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKVPodSet.HashedName), + fmt.Sprintf("-N %s --exist hash:ip,port", TestNamedportSet.HashedName), + fmt.Sprintf("-N %s --exist nethash maxelem 4294967295", TestCIDRSet.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKVNSList.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestNestedLabelList.HashedName), + fmt.Sprintf("-A %s 10.0.0.0", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.1", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.5", TestKeyPodSet.HashedName), + fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName), + fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestKeyPodSet.HashedName), + fmt.Sprintf("-A %s %s", TestKVNSList.HashedName, TestKVPodSet.HashedName), + "", + } + sortedExpectedLines := testAndSortRestoreFileLines(t, lines) + + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.NoError(t, err) - require.False(t, wasFileAltered) + require.NoError(t, err, "ipset restore should be successful") + require.False(t, wasFileAltered, "file should not be altered") } -func TestApplyDeletions(t *testing.T) { +func TestDestroy(t *testing.T) { + // without save file calls := []testutils.TestCmd{fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) - // Remove members and delete others - iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) + // remove some members and destroy some sets require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) + require.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) iMgr.CreateIPSets([]*IPSetMetadata{TestKeyPodSet.Metadata}) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyNSList.Metadata}) require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata, TestKeyPodSet.Metadata})) - require.NoError(t, iMgr.RemoveFromSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) require.NoError(t, iMgr.RemoveFromList(TestKeyNSList.Metadata, []*IPSetMetadata{TestKeyPodSet.Metadata})) - iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestCIDRSet.PrefixName) - iMgr.CreateIPSets([]*IPSetMetadata{TestNestedLabelList.Metadata}) + iMgr.CreateIPSets([]*IPSetMetadata{TestNestedLabelList.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestNestedLabelList.PrefixName) - toDeleteSetNames := []string{TestCIDRSet.PrefixName, TestNestedLabelList.PrefixName} - assertEqualContentsTestHelper(t, toDeleteSetNames, iMgr.toDeleteCache) - toAddOrUpdateSetNames := []string{TestNSSet.PrefixName, TestKeyPodSet.PrefixName, TestKeyNSList.PrefixName} - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - creator := iMgr.getFileCreator(1, toDeleteSetNames, toAddOrUpdateSetNames) - actualFileString := getSortedFileString(creator) + creator := iMgr.fileCreator(len(calls), nil) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) lines := []string{ fmt.Sprintf("-F %s", TestCIDRSet.HashedName), fmt.Sprintf("-F %s", TestNestedLabelList.HashedName), fmt.Sprintf("-X %s", TestCIDRSet.HashedName), fmt.Sprintf("-X %s", TestNestedLabelList.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestNSSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestKeyPodSet.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestNSSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-A %s 10.0.0.0", TestNSSet.HashedName), + fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName), + "", } - lines = append(lines, getSortedLines(TestNSSet, "10.0.0.0")...) - lines = append(lines, getSortedLines(TestKeyPodSet)...) - lines = append(lines, getSortedLines(TestKeyNSList, TestNSSet.HashedName)...) - expectedFileString := strings.Join(lines, "\n") + "\n" + sortedExpectedLines := testAndSortRestoreFileLines(t, lines) - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.NoError(t, err) - require.False(t, wasFileAltered) + require.NoError(t, err, "ipset restore should be successful") + require.False(t, wasFileAltered, "file should not be altered") } -// TODO test that a reconcile list is updated -func TestFailureOnCreation(t *testing.T) { - setAlreadyExistsCommand := testutils.TestCmd{ - Cmd: ipsetRestoreStringSlice, - Stdout: "Error in line 3: Set cannot be created: set with the same name already exists", - ExitCode: 1, +func TestUpdateWithIdenticalSaveFile(t *testing.T) { + calls := []testutils.TestCmd{fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.1", TestNSSet.HashedName), + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyPodSet.HashedName), + fmt.Sprintf("add %s 10.0.0.5", TestKeyPodSet.HashedName), + fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestNamedportSet.HashedName), + fmt.Sprintf("create %s list:set size 8", TestKeyNSList.HashedName), + fmt.Sprintf("add %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName), + fmt.Sprintf("add %s %s", TestKeyNSList.HashedName, TestKeyPodSet.HashedName), + fmt.Sprintf("create %s list:set size 8", TestKVNSList.HashedName), + fmt.Sprintf("add %s %s", TestKVNSList.HashedName, TestKVPodSet.HashedName), + fmt.Sprintf("create %s list:set size 8", TestNestedLabelList.HashedName), } - calls := []testutils.TestCmd{setAlreadyExistsCommand, fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) - iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyPodSet.Metadata}) require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "10.0.0.5", "c")) - iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) - iMgr.DeleteIPSet(TestCIDRSet.PrefixName) + iMgr.CreateIPSets([]*IPSetMetadata{TestNamedportSet.Metadata}) + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata, TestKeyPodSet.Metadata})) + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKVNSList.Metadata}, []*IPSetMetadata{TestKVPodSet.Metadata})) + iMgr.CreateIPSets([]*IPSetMetadata{TestNestedLabelList.Metadata}) + + creator := iMgr.fileCreator(len(calls), saveFileBytes) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + + lines := []string{ + fmt.Sprintf("-N %s --exist nethash", TestNSSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKVPodSet.HashedName), + fmt.Sprintf("-N %s --exist hash:ip,port", TestNamedportSet.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKVNSList.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestNestedLabelList.HashedName), + "", + } + sortedExpectedLines := testAndSortRestoreFileLines(t, lines) - toAddOrUpdateSetNames := []string{TestNSSet.PrefixName, TestKeyPodSet.PrefixName} - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - toDeleteSetNames := []string{TestCIDRSet.PrefixName} - assertEqualContentsTestHelper(t, toDeleteSetNames, iMgr.toDeleteCache) - creator := iMgr.getFileCreator(2, toDeleteSetNames, toAddOrUpdateSetNames) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.Error(t, err) - require.True(t, wasFileAltered) + require.NoError(t, err, "ipset restore should be successful") + require.False(t, wasFileAltered, "file should not be altered") +} + +func TestUpdateWithRealisticSaveFile(t *testing.T) { + // save file doesn't have some sets we're adding and has some sets that: + // - aren't dirty + // - will be deleted + // - have members which we will delete + // - are missing members, which we will add + calls := []testutils.TestCmd{fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), // should add 10.0.0.1-5 to this set + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), // keep this member + fmt.Sprintf("add %s 5.6.7.8", TestNSSet.HashedName), // delete this member + fmt.Sprintf("add %s 5.6.7.9", TestNSSet.HashedName), // delete this member + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyPodSet.HashedName), // dirty but no member changes in the end + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKVPodSet.HashedName), // ignore this set since it's not dirty + fmt.Sprintf("add %s 1.2.3.4", TestKVPodSet.HashedName), // ignore this set since it's not dirty + fmt.Sprintf("create %s list:set size 8", TestKeyNSList.HashedName), // should add TestKeyPodSet to this set + fmt.Sprintf("add %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName), // keep this member + fmt.Sprintf("add %s %s", TestKeyNSList.HashedName, TestNamedportSet.HashedName), // delete this member + fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestNamedportSet.HashedName), // ignore this set since it's not dirty + fmt.Sprintf("create %s list:set size 8", TestNestedLabelList.HashedName), // this set will be deleted + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "b")) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.2", "c")) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.3", "d")) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.4", "e")) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.5", "f")) + iMgr.CreateIPSets([]*IPSetMetadata{TestKeyPodSet.Metadata}) + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata, TestKeyPodSet.Metadata})) + iMgr.CreateIPSets([]*IPSetMetadata{TestKVNSList.Metadata}) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestCIDRSet.Metadata}, "1.2.3.4", "z")) // set not in save file + iMgr.CreateIPSets([]*IPSetMetadata{TestNestedLabelList.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestNestedLabelList.PrefixName) + + creator := iMgr.fileCreator(len(calls), saveFileBytes) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) // adding NSSet and KeyPodSet (should be keeping NSSet and deleting NamedportSet) lines := []string{ - fmt.Sprintf("-F %s", TestCIDRSet.HashedName), - fmt.Sprintf("-X %s", TestCIDRSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-F %s", TestNestedLabelList.HashedName), + fmt.Sprintf("-X %s", TestNestedLabelList.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestNSSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKVNSList.HashedName), + fmt.Sprintf("-N %s --exist nethash maxelem 4294967295", TestCIDRSet.HashedName), + fmt.Sprintf("-A %s 1.2.3.4", TestCIDRSet.HashedName), + fmt.Sprintf("-D %s 5.6.7.8", TestNSSet.HashedName), + fmt.Sprintf("-D %s 5.6.7.9", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.1", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.2", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.3", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.4", TestNSSet.HashedName), + fmt.Sprintf("-A %s 10.0.0.5", TestNSSet.HashedName), + fmt.Sprintf("-D %s %s", TestKeyNSList.HashedName, TestNamedportSet.HashedName), + fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestKeyPodSet.HashedName), + "", } - lines = append(lines, getSortedLines(TestKeyPodSet, "10.0.0.5")...) - expectedFileString := strings.Join(lines, "\n") + "\n" + sortedExpectedLines := testAndSortRestoreFileLines(t, lines) - actualFileString := getSortedFileString(creator) - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.NoError(t, err, "ipset restore should be successful") + require.False(t, wasFileAltered, "file should not be altered") +} + +func TestHaveTypeProblem(t *testing.T) { + testTypeProblem := func(shouldHaveProblem bool, metadata *IPSetMetadata, lineString string) { + set := NewIPSet(metadata) + line := []byte(lineString) + createMatches := nameForCreateRegex.FindSubmatch(line) + require.Equal(t, 2, len(createMatches), "didn't find match for line: %s", string(line)) + restOfLine := line[len(createMatches[0]):] + if shouldHaveProblem { + require.True(t, haveTypeProblem(set, restOfLine)) + } else { + require.False(t, haveTypeProblem(set, restOfLine)) + } + } + testTypeProblem(false, TestNSSet.Metadata, fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName)) + testTypeProblem(true, TestNamedportSet.Metadata, fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNamedportSet.HashedName)) + testTypeProblem(true, TestKeyNSList.Metadata, fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyNSList.HashedName)) + testTypeProblem(false, TestNamedportSet.Metadata, fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestNamedportSet.HashedName)) + testTypeProblem(true, TestNSSet.Metadata, fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName)) + testTypeProblem(true, TestKeyNSList.Metadata, fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestKeyNSList.HashedName)) + testTypeProblem(false, TestKeyNSList.Metadata, fmt.Sprintf("create %s list:set size 8", TestKeyNSList.HashedName)) + testTypeProblem(true, TestNSSet.Metadata, fmt.Sprintf("create %s list:set size 8", TestNSSet.HashedName)) + testTypeProblem(true, TestNamedportSet.Metadata, fmt.Sprintf("create %s list:set size 8", TestNamedportSet.HashedName)) +} + +func TestUpdateWithBadSaveFile(t *testing.T) { + calls := []testutils.TestCmd{fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + // will have every set in save file in the dirty cache, all with no members + cidrSet2 := CreateTestSet("test2", CIDRBlocks) + nsSet2 := CreateTestSet("test2", Namespace) + saveFileLines := []string{ + fmt.Sprintf("add %s 1.1.1.1", TestCIDRSet.HashedName), // file should start with a create. jump to the first create (will NO-OP [no delete]) + fmt.Sprintf("add %s 2.2.2.2", TestCIDRSet.HashedName), // will have a no-op for same reason as above + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), // include + fmt.Sprintf("add %s 3.3.3.3", TestNSSet.HashedName), // include this add (will DELETE this member) + "create test-set1 hash:net family inet hashsize 1024 maxelem 65536", // ignore this set since it isn't part of NPM + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyPodSet.HashedName), // include + fmt.Sprintf("add %s 4.4.4.4", TestKeyPodSet.HashedName), // include this add (will DELETE this member) + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyPodSet.HashedName), // ignore this create and ensuing adds since we already included this set + fmt.Sprintf("add %s 5.5.5.5", TestKeyPodSet.HashedName), // ignore this add (will NO-OP [no delete]) + "create test-set1 hash:net family inet hashsize 1024 maxelem 65536", // ignore this set since it isn't part of NPM + fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestKVPodSet.HashedName), // ignore since wrong type + fmt.Sprintf("add %s 1.2.3.4,tcp", TestKVPodSet.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s hash:ip,port family inet hashsize 1024 maxelem 65536", TestKeyNSList.HashedName), // ignore since wrong type (will NO-OP [no delete]) + fmt.Sprintf("add %s 2.3.4.5,tcp", TestKVPodSet.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNamedportSet.HashedName), // ignore since wrong type (will NO-OP [no delete]) + fmt.Sprintf("add %s 1.2.2.1", TestNamedportSet.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyNSList.HashedName), // ignore since wrong type (will NO-OP [no delete]) + fmt.Sprintf("add %s 1.3.3.1", TestKeyNSList.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s list:set size 8", TestNamedportSet.HashedName), // ignore since wrong type (will NO-OP [no delete]) + fmt.Sprintf("add %s %s", TestNamedportSet.HashedName, TestNSSet.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s list:set size 8", TestKVPodSet.HashedName), // ignore since wrong type (will NO-OP [no delete]) + fmt.Sprintf("add %s %s", TestKVPodSet.HashedName, TestKeyPodSet.HashedName), // ignore this add (will NO-OP [no delete]) + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", cidrSet2.HashedName), // include this and adds up to unexpected add + fmt.Sprintf("add %s 7.7.7.7", cidrSet2.HashedName), // include this add (will DELETE this member) + fmt.Sprintf("add %s 8.8.8.8", nsSet2.HashedName), // ignore this and jump to next create since it's an unexpected set (will NO-OP [no delete]) + fmt.Sprintf("add %s 9.9.9.9", cidrSet2.HashedName), // ignore add because of error above (will NO-OP [no delete]) + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + iMgr.CreateIPSets([]*IPSetMetadata{ + TestCIDRSet.Metadata, TestNSSet.Metadata, TestKeyPodSet.Metadata, TestKVPodSet.Metadata, + TestKeyNSList.Metadata, TestNamedportSet.Metadata, cidrSet2.Metadata, nsSet2.Metadata, + }) + + creator := iMgr.fileCreator(len(calls), saveFileBytes) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + + expectedLines := []string{ + fmt.Sprintf("-N %s --exist nethash", TestNSSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", nsSet2.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKeyPodSet.HashedName), + fmt.Sprintf("-N %s --exist nethash", TestKVPodSet.HashedName), + fmt.Sprintf("-N %s --exist hash:ip,port", TestNamedportSet.HashedName), + fmt.Sprintf("-N %s --exist nethash maxelem 4294967295", TestCIDRSet.HashedName), + fmt.Sprintf("-N %s --exist nethash maxelem 4294967295", cidrSet2.HashedName), + fmt.Sprintf("-N %s --exist setlist", TestKeyNSList.HashedName), + fmt.Sprintf("-D %s 3.3.3.3", TestNSSet.HashedName), + fmt.Sprintf("-D %s 4.4.4.4", TestKeyPodSet.HashedName), + fmt.Sprintf("-D %s 7.7.7.7", cidrSet2.HashedName), + "", + } + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.NoError(t, err, "ipset restore should be successful") + require.False(t, wasFileAltered, "file should not be altered") +} + +func TestFailureOnCreateForNewSet(t *testing.T) { + // with respect to the error line, be weary that sets in the save file are processed first and in order, and other sets are processed in random order + // test logic: + // - delete a set + // - create three sets, each with two members. the second set to appear will fail to be created + errorLineNum := 4 + setToCreateAlreadyExistsCommand := testutils.TestCmd{ + Cmd: ipsetRestoreStringSlice, + Stdout: fmt.Sprintf("Error in line %d: Set cannot be created: set with the same name already exists", errorLineNum), + ExitCode: 1, + } + calls := []testutils.TestCmd{setToCreateAlreadyExistsCommand, fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + // add all of these members to the kernel + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKVPodSet.Metadata}, "1.2.3.4", "a")) // create and add member + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKVPodSet.Metadata}, "1.2.3.5", "b")) // add member + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestCIDRSet.Metadata}, "1.2.3.4", "a")) // create and add member + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestCIDRSet.Metadata}, "1.2.3.5", "b")) // add member + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNamedportSet.Metadata}, "1.2.3.4,tcp:567", "a")) // create and add member + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNamedportSet.Metadata}, "1.2.3.5,tcp:567", "b")) // add member + iMgr.CreateIPSets([]*IPSetMetadata{TestKeyNSList.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestKeyNSList.PrefixName) + + // get original creator and run it the first time + creator := iMgr.fileCreator(len(calls), nil) + originalLines := strings.Split(creator.ToString(), "\n") + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + // rerun the creator after removing previously run lines, and aborting the create, adds, and deletes for the second set to updated + removedSetName := hashedNameOfSetImpacted(t, "-N", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestNSSet.HashedName, TestKVPodSet.HashedName, TestCIDRSet.HashedName, TestNamedportSet.HashedName}) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + originalLength := len(expectedLines) + expectedLines = removeOperationsForSet(expectedLines, removedSetName, "-A") + require.Equal(t, originalLength-2, len(expectedLines), "expected to remove two add lines") + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") require.NoError(t, err) - require.False(t, wasFileAltered) + require.False(t, wasFileAltered, "file should not be altered") } -// TODO test that a reconcile list is updated -func TestFailureOnAddToList(t *testing.T) { - // This exact scenario wouldn't occur. This error happens when the cache is out of date with the kernel. +func TestFailureOnCreateForSetInKernel(t *testing.T) { + // with respect to the error line, be weary that sets in the save file are processed first and in order, and other sets are processed in random order + // test logic: + // - delete a set + // - update three sets already in the kernel, each with a delete and add line. the second set to appear will fail to be created + errorLineNum := 4 + setToCreateAlreadyExistsCommand := testutils.TestCmd{ + Cmd: ipsetRestoreStringSlice, + Stdout: fmt.Sprintf("Error in line %d: Set cannot be created: set with the same name already exists", errorLineNum), + ExitCode: 1, + } + calls := []testutils.TestCmd{setToCreateAlreadyExistsCommand, fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), // delete + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKeyPodSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestKeyPodSet.HashedName), // delete + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestKVPodSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestKVPodSet.HashedName), // delete + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + // add all of these members to the kernel + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "6.7.8.9", "a")) // add member to kernel + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "6.7.8.9", "a")) // add member to kernel + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKVPodSet.Metadata}, "6.7.8.9", "a")) // add member to kernel + iMgr.CreateIPSets([]*IPSetMetadata{TestKeyNSList.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestKeyNSList.PrefixName) + + // get original creator and run it the first time + creator := iMgr.fileCreator(len(calls), saveFileBytes) + originalLines := strings.Split(creator.ToString(), "\n") + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + // rerun the creator after removing previously run lines, and aborting the create, adds, and deletes for the second set to updated + removedSetName := hashedNameOfSetImpacted(t, "-N", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestNSSet.HashedName, TestKeyPodSet.HashedName, TestKVPodSet.HashedName}) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + originalLength := len(expectedLines) + expectedLines = removeOperationsForSet(expectedLines, removedSetName, "-D") + require.Equal(t, originalLength-1, len(expectedLines), "expected to remove a delete line") + expectedLines = removeOperationsForSet(expectedLines, removedSetName, "-A") + require.Equal(t, originalLength-2, len(expectedLines), "expected to remove an add line") + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) + wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") + require.NoError(t, err) + require.False(t, wasFileAltered, "file should not be altered") +} + +func TestFailureOnAddToListInKernel(t *testing.T) { + // with respect to the error line, be weary that sets in the save file are processed first and in order, and other sets are processed in random order + // test logic: + // - delete a set + // - update three lists already in the set, each with a delete and add line. the second list to appear will have the failed add + // - create a set and add a member to it + errorLineNum := 10 setAlreadyExistsCommand := testutils.TestCmd{ Cmd: ipsetRestoreStringSlice, - Stdout: "Error in line 12: Set to be added/deleted/tested as element does not exist", + Stdout: fmt.Sprintf("Error in line %d: Set to be added/deleted/tested as element does not exist", errorLineNum), // this error might happen if the cache is out of date with the kernel ExitCode: 1, } calls := []testutils.TestCmd{setAlreadyExistsCommand, fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s list:set size 8", TestKeyNSList.HashedName), + fmt.Sprintf("add %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName), // delete this member + fmt.Sprintf("create %s list:set size 8", TestKVNSList.HashedName), + fmt.Sprintf("add %s %s", TestKVNSList.HashedName, TestNSSet.HashedName), // delete this member + fmt.Sprintf("create %s list:set size 8", TestNestedLabelList.HashedName), + fmt.Sprintf("add %s %s", TestNestedLabelList.HashedName, TestNSSet.HashedName), // delete this member - iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) - require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyPodSet.Metadata}) - iMgr.CreateIPSets([]*IPSetMetadata{TestKeyNSList.Metadata}) - require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata, TestKeyPodSet.Metadata})) - iMgr.CreateIPSets([]*IPSetMetadata{TestKVNSList.Metadata}) - require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKVNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata})) - iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "10.0.0.0", "a")) // create and add member + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestKeyPodSet.Metadata})) // add member to kernel + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKVNSList.Metadata}, []*IPSetMetadata{TestKeyPodSet.Metadata})) // add member to kernel + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestNestedLabelList.Metadata}, []*IPSetMetadata{TestKeyPodSet.Metadata})) // add member to kernel + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestCIDRSet.PrefixName) - toAddOrUpdateSetNames := []string{ - TestNSSet.PrefixName, - TestKeyPodSet.PrefixName, - TestKeyNSList.PrefixName, - TestKVNSList.PrefixName, - } - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - toDeleteSetNames := []string{TestCIDRSet.PrefixName} - assertEqualContentsTestHelper(t, toDeleteSetNames, iMgr.toDeleteCache) - creator := iMgr.getFileCreator(2, toDeleteSetNames, toAddOrUpdateSetNames) - originalFileString := creator.ToString() + creator := iMgr.fileCreator(len(calls), saveFileBytes) + originalLines := strings.Split(creator.ToString(), "\n") wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.Error(t, err) - require.True(t, wasFileAltered) - - lines := []string{ - fmt.Sprintf("-F %s", TestCIDRSet.HashedName), - fmt.Sprintf("-X %s", TestCIDRSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestNSSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestKeyPodSet.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestKeyNSList.HashedName), - fmt.Sprintf("-N %s -exist setlist", TestKVNSList.HashedName), - } - lines = append(lines, getSortedLines(TestNSSet, "10.0.0.0")...) - lines = append(lines, getSortedLines(TestKeyPodSet)...) // line 9 - lines = append(lines, getSortedLines(TestKeyNSList, TestNSSet.HashedName, TestKeyPodSet.HashedName)...) // lines 10, 11, 12 - lines = append(lines, getSortedLines(TestKVNSList, TestNSSet.HashedName)...) - expectedFileString := strings.Join(lines, "\n") + "\n" - - // need this because adds are nondeterminstic - badLine := strings.Split(originalFileString, "\n")[12-1] - if badLine != fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestNSSet.HashedName) && badLine != fmt.Sprintf("-A %s %s", TestKeyNSList.HashedName, TestKeyPodSet.HashedName) { - require.FailNow(t, "incorrect failed line") - } - expectedFileString = strings.ReplaceAll(expectedFileString, badLine+"\n", "") - - actualFileString := getSortedFileString(creator) - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + // rerun the creator after removing previously run lines, and aborting the member-add line that failed + removedSetName := hashedNameOfSetImpacted(t, "-A", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestKeyNSList.HashedName, TestKVNSList.HashedName, TestNestedLabelList.HashedName}) + removedMember := memberNameOfSetImpacted(t, originalLines, errorLineNum) + require.Equal(t, TestKeyPodSet.HashedName, removedMember) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") require.NoError(t, err) - require.False(t, wasFileAltered) + require.False(t, wasFileAltered, "file should not be altered") } -// TODO test that a reconcile list is updated -func TestFailureOnFlush(t *testing.T) { - // This exact scenario wouldn't occur. This error happens when the cache is out of date with the kernel. +func TestFailureOnAddToNewList(t *testing.T) { + // with respect to the error line, be weary that sets in the save file are processed first and in order, and other sets are processed in random order + // test logic: + // - delete a set + // - update a set already in the kernel with a delete and add line + // - create three lists in the set, each with an add line. the second list to appear will have the failed add + errorLineNum := 10 setAlreadyExistsCommand := testutils.TestCmd{ Cmd: ipsetRestoreStringSlice, - Stdout: "Error in line 1: The set with the given name does not exist", + Stdout: fmt.Sprintf("Error in line %d: Set to be added/deleted/tested as element does not exist", errorLineNum), // this error might happen if the cache is out of date with the kernel ExitCode: 1, } calls := []testutils.TestCmd{setAlreadyExistsCommand, fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) - iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) - require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) - iMgr.CreateIPSets([]*IPSetMetadata{TestKVPodSet.Metadata}) - iMgr.DeleteIPSet(TestKVPodSet.PrefixName) - iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), // delete this member + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.1", "a")) // create and add member + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKeyNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata})) // add member to kernel + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestKVNSList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata})) // add member to kernel + require.NoError(t, iMgr.AddToLists([]*IPSetMetadata{TestNestedLabelList.Metadata}, []*IPSetMetadata{TestNSSet.Metadata})) // add member to kernel + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestCIDRSet.PrefixName) - toAddOrUpdateSetNames := []string{TestNSSet.PrefixName} - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - toDeleteSetNames := []string{TestKVPodSet.PrefixName, TestCIDRSet.PrefixName} - assertEqualContentsTestHelper(t, toDeleteSetNames, iMgr.toDeleteCache) - creator := iMgr.getFileCreator(2, toDeleteSetNames, toAddOrUpdateSetNames) + creator := iMgr.fileCreator(len(calls), saveFileBytes) + originalLines := strings.Split(creator.ToString(), "\n") wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.Error(t, err) - require.True(t, wasFileAltered) + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + // rerun the creator after removing previously run lines, and aborting the member-add line that failed + removedSetName := hashedNameOfSetImpacted(t, "-A", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestKeyNSList.HashedName, TestKVNSList.HashedName, TestNestedLabelList.HashedName}) + removedMember := memberNameOfSetImpacted(t, originalLines, errorLineNum) + require.Equal(t, TestNSSet.HashedName, removedMember) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) + wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") + require.NoError(t, err) + require.False(t, wasFileAltered, "file should not be altered") +} - lines := []string{ - fmt.Sprintf("-F %s", TestCIDRSet.HashedName), - fmt.Sprintf("-X %s", TestCIDRSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestNSSet.HashedName), +func TestFailureOnDelete(t *testing.T) { + // TODO +} + +func TestFailureOnFlush(t *testing.T) { + // test logic: + // - delete two sets. the first to appear will fail to flush + // - update a set by deleting a member + // - create a set with a member + errorLineNum := 1 + setAlreadyExistsCommand := testutils.TestCmd{ + Cmd: ipsetRestoreStringSlice, + Stdout: fmt.Sprintf("Error in line %d: The set with the given name does not exist", errorLineNum), // this error might happen if the cache is out of date with the kernel + ExitCode: 1, + } + calls := []testutils.TestCmd{setAlreadyExistsCommand, fakeRestoreSuccessCommand} + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), // keep this member + fmt.Sprintf("add %s 10.0.0.1", TestNSSet.HashedName), // delete this member } - lines = append(lines, getSortedLines(TestNSSet, "10.0.0.0")...) - expectedFileString := strings.Join(lines, "\n") + "\n" + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) + + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) // in kernel already + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "10.0.0.0", "a")) // not in kernel yet + iMgr.CreateIPSets([]*IPSetMetadata{TestKVPodSet.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestKVPodSet.PrefixName) + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestCIDRSet.PrefixName) - actualFileString := getSortedFileString(creator) - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + creator := iMgr.fileCreator(len(calls), saveFileBytes) + originalLines := strings.Split(creator.ToString(), "\n") + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + // rerun the creator after aborting the flush and delete for the set that failed to flush + removedSetName := hashedNameOfSetImpacted(t, "-F", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestKVPodSet.HashedName, TestCIDRSet.HashedName}) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + originalLength := len(expectedLines) + expectedLines = removeOperationsForSet(expectedLines, removedSetName, "-X") + require.Equal(t, originalLength-1, len(expectedLines), "expected to remove one destroy line") + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) + + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") require.NoError(t, err) - require.False(t, wasFileAltered) + require.False(t, wasFileAltered, "file should not be altered") } -// TODO test that a reconcile list is updated -func TestFailureOnDeletion(t *testing.T) { +func TestFailureOnDestroy(t *testing.T) { + // test logic: + // - delete two sets. the first to appear will fail to delete + // - update a set by deleting a member + // - create a set with a member + errorLineNum := 3 setAlreadyExistsCommand := testutils.TestCmd{ Cmd: ipsetRestoreStringSlice, - Stdout: "Error in line 3: Set cannot be destroyed: it is in use by a kernel component", + Stdout: fmt.Sprintf("Error in line %d: Set cannot be destroyed: it is in use by a kernel component", errorLineNum), ExitCode: 1, } calls := []testutils.TestCmd{setAlreadyExistsCommand, fakeRestoreSuccessCommand} - iMgr := NewIPSetManager(iMgrApplyAllCfg, common.NewMockIOShim(calls)) + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + saveFileLines := []string{ + fmt.Sprintf("create %s hash:net family inet hashsize 1024 maxelem 65536", TestNSSet.HashedName), + fmt.Sprintf("add %s 10.0.0.0", TestNSSet.HashedName), // keep this member + fmt.Sprintf("add %s 10.0.0.1", TestNSSet.HashedName), // delete this member + } + saveFileString := strings.Join(saveFileLines, "\n") + saveFileBytes := []byte(saveFileString) - iMgr.CreateIPSets([]*IPSetMetadata{TestNSSet.Metadata}) - require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) - iMgr.CreateIPSets([]*IPSetMetadata{TestKVPodSet.Metadata}) + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestNSSet.Metadata}, "10.0.0.0", "a")) // in kernel already + require.NoError(t, iMgr.AddToSets([]*IPSetMetadata{TestKeyPodSet.Metadata}, "10.0.0.0", "a")) // not in kernel yet + iMgr.CreateIPSets([]*IPSetMetadata{TestKVPodSet.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestKVPodSet.PrefixName) - iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete iMgr.DeleteIPSet(TestCIDRSet.PrefixName) - toAddOrUpdateSetNames := []string{TestNSSet.PrefixName} - assertEqualContentsTestHelper(t, toAddOrUpdateSetNames, iMgr.toAddOrUpdateCache) - toDeleteSetNames := []string{TestKVPodSet.PrefixName, TestCIDRSet.PrefixName} - assertEqualContentsTestHelper(t, toDeleteSetNames, iMgr.toDeleteCache) - creator := iMgr.getFileCreator(2, toDeleteSetNames, toAddOrUpdateSetNames) + creator := iMgr.fileCreator(len(calls), saveFileBytes) + originalLines := strings.Split(creator.ToString(), "\n") wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") - require.Error(t, err) - require.True(t, wasFileAltered) + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") - lines := []string{ - fmt.Sprintf("-F %s", TestKVPodSet.HashedName), - fmt.Sprintf("-F %s", TestCIDRSet.HashedName), - fmt.Sprintf("-X %s", TestCIDRSet.HashedName), - fmt.Sprintf("-N %s -exist nethash", TestNSSet.HashedName), - } - lines = append(lines, getSortedLines(TestNSSet, "10.0.0.0")...) - expectedFileString := strings.Join(lines, "\n") + "\n" + removedSetName := hashedNameOfSetImpacted(t, "-X", originalLines, errorLineNum) + requireStringInSlice(t, removedSetName, []string{TestKVPodSet.HashedName, TestCIDRSet.HashedName}) + expectedLines := originalLines[errorLineNum:] // skip the error line and the lines previously run + sortedExpectedLines := testAndSortRestoreFileLines(t, expectedLines) - actualFileString := getSortedFileString(creator) - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, sortedExpectedLines, actualLines) wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") require.NoError(t, err) - require.False(t, wasFileAltered) + require.False(t, wasFileAltered, "file should not be altered") } -// TODO if we add file-level error handlers, add tests for them - -func assertEqualContentsTestHelper(t *testing.T, setNames []string, cache map[string]struct{}) { - require.Equal(t, len(setNames), len(cache), "cache is different than list of set names") - for _, setName := range setNames { - _, exists := cache[setName] - require.True(t, exists, "cache is different than list of set names") +func TestFailureOnLastLine(t *testing.T) { + // make sure that the file recovers and returns no error when there are no more lines on the second run + // test logic: + // - delete a set + errorLineNum := 2 + calls := []testutils.TestCmd{ + { + Cmd: ipsetRestoreStringSlice, + Stdout: fmt.Sprintf("Error in line %d: some destroy error", errorLineNum), + ExitCode: 1, + }, } + ioshim := common.NewMockIOShim(calls) + defer ioshim.VerifyCalls(t, calls) + iMgr := NewIPSetManager(iMgrApplyAllCfg, ioshim) + + iMgr.CreateIPSets([]*IPSetMetadata{TestCIDRSet.Metadata}) // create so we can delete + iMgr.DeleteIPSet(TestCIDRSet.PrefixName) + + creator := iMgr.fileCreator(2, nil) + wasFileAltered, err := creator.RunCommandOnceWithFile("ipset", "restore") + require.Error(t, err, "ipset restore should fail") + require.True(t, wasFileAltered, "file should be altered") + + expectedLines := []string{""} // skip the error line and the lines previously run + actualLines := testAndSortRestoreFileString(t, creator.ToString()) + dptestutils.AssertEqualLines(t, expectedLines, actualLines) + wasFileAltered, err = creator.RunCommandOnceWithFile("ipset", "restore") + require.NoError(t, err) + require.False(t, wasFileAltered, "file should not be altered") } -// the order of adds is nondeterministic, so we're sorting them -func getSortedLines(set *TestSet, members ...string) []string { - result := []string{fmt.Sprintf("-F %s", set.HashedName)} - adds := make([]string, len(members)) - for k, member := range members { - adds[k] = fmt.Sprintf("-A %s %s", set.HashedName, member) - } - sort.Strings(adds) - return append(result, adds...) +// make sure file goes in order of flushes, destroys, creates, then adds/deletes, +// then sort those sections and return the lines in an array +func testAndSortRestoreFileString(t *testing.T, multilineString string) []string { + return testAndSortRestoreFileLines(t, strings.Split(multilineString, "\n")) } -// the order of adds is nondeterministic, so we're sorting all neighboring adds -func getSortedFileString(creator *ioutil.FileCreator) string { - lines := strings.Split(creator.ToString(), "\n") +func testAndSortRestoreFileLines(t *testing.T, lines []string) []string { + require.True(t, lines[len(lines)-1] == "", "restore file must end with blank line") + lines = lines[:len(lines)-1] // remove the blank line - sortedLines := make([]string, 0) + flushIndices := [2]int{0, len(lines)} + destroyIndices := flushIndices + createIndices := flushIndices + addDeleteIndices := flushIndices k := 0 for k < len(lines) { - line := lines[k] - if !isAddLine(line) { - sortedLines = append(sortedLines, line) - k++ - continue + operation := lines[k][0:2] + if operation != "-F" { + flushIndices[1] = k + destroyIndices[0] = k + break } - addLines := make([]string, 0) - for k < len(lines) { - line := lines[k] - if !isAddLine(line) { - break - } - addLines = append(addLines, line) - k++ + k++ + } + for k < len(lines) { + operation := lines[k][0:2] + require.False(t, operation == "-F", "should not get -F operation in the restore file after flush section") + if operation != "-X" { + destroyIndices[1] = k + createIndices[0] = k + break + } + k++ + } + for k < len(lines) { + operation := lines[k][0:2] + require.False(t, operation == "-F" || operation == "-X", "should not get %s operation in the restore file after destroy section") + if operation != "-N" { + createIndices[1] = k + addDeleteIndices[0] = k + break } - sort.Strings(addLines) - sortedLines = append(sortedLines, addLines...) + k++ } - return strings.Join(sortedLines, "\n") + for k < len(lines) { + operation := lines[k][0:2] + require.True(t, operation == "-D" || operation == "-A", "should not get %s operation in the restore file after create section", operation) + k++ + } + flushLines := lines[flushIndices[0]:flushIndices[1]] + destroyLines := lines[destroyIndices[0]:destroyIndices[1]] + createLines := lines[createIndices[0]:createIndices[1]] + addDeleteLines := lines[addDeleteIndices[0]:addDeleteIndices[1]] + sort.Strings(flushLines) + sort.Strings(destroyLines) + sort.Strings(createLines) + sort.Strings(addDeleteLines) + result := flushLines + result = append(result, destroyLines...) + result = append(result, createLines...) + result = append(result, addDeleteLines...) + return result +} + +func hashedNameOfSetImpacted(t *testing.T, operation string, lines []string, lineNum int) string { + lineNumIndex := lineNum - 1 + line := lines[lineNumIndex] + pattern := fmt.Sprintf(`\%s (azure-npm-\d+)`, operation) + re := regexp.MustCompile(pattern) + results := re.FindStringSubmatch(line) + require.Equal(t, 2, len(results), "expected to find a match with regex pattern %s for line: %s", pattern, line) + return results[1] // second item in slice is the group surrounded by () } -func isAddLine(line string) bool { - return len(line) >= 2 && line[:2] == "-A" +func memberNameOfSetImpacted(t *testing.T, lines []string, lineNum int) string { + lineNumIndex := lineNum - 1 + line := lines[lineNumIndex] + pattern := `\-[AD] azure-npm-\d+ (.*)` + re := regexp.MustCompile(pattern) + member := re.FindStringSubmatch(line)[1] + results := re.FindStringSubmatch(line) + require.Equal(t, 2, len(results), "expected to find a match with regex pattern %s for line: %s", pattern, line) + return member +} + +func requireStringInSlice(t *testing.T, item string, possibleValues []string) { + success := false + for _, value := range possibleValues { + if item == value { + success = true + break + } + } + require.Truef(t, success, "item %s was not one of the possible values", item) +} + +// remove lines that start with the operation (include the dash in the operations) e.g. +// -A 1.2.3.4 +// -D 1.2.3.4 +// -X +func removeOperationsForSet(lines []string, hashedSetName, operation string) []string { + operationRegex := regexp.MustCompile(fmt.Sprintf(`\%s %s`, operation, hashedSetName)) + goodLines := []string{} + for _, line := range lines { + if !operationRegex.MatchString(line) { + goodLines = append(goodLines, line) + } + } + return goodLines } diff --git a/npm/pkg/dataplane/ipsets/testutils_linux.go b/npm/pkg/dataplane/ipsets/testutils_linux.go index f1bb187c33..4181f75a17 100644 --- a/npm/pkg/dataplane/ipsets/testutils_linux.go +++ b/npm/pkg/dataplane/ipsets/testutils_linux.go @@ -2,14 +2,25 @@ package ipsets import testutils "github.com/Azure/azure-container-networking/test/utils" -var fakeRestoreSuccessCommand = testutils.TestCmd{ - Cmd: []string{"ipset", "restore"}, - Stdout: "success", - ExitCode: 0, -} +var ( + ipsetRestoreStringSlice = []string{"ipset", "restore"} + ipsetSaveStringSlice = []string{"ipset", "save"} + + fakeRestoreSuccessCommand = testutils.TestCmd{ + Cmd: ipsetRestoreStringSlice, + Stdout: "success", + ExitCode: 0, + } +) func GetApplyIPSetsTestCalls(toAddOrUpdateIPSets, toDeleteIPSets []*IPSetMetadata) []testutils.TestCmd { - // TODO eventually call ipset save if there are toAddOrUpdateIPSets + if len(toAddOrUpdateIPSets) > 0 { + return []testutils.TestCmd{ + {Cmd: ipsetSaveStringSlice, PipedToCommand: true}, + {Cmd: []string{"grep", "azure-npm-"}, ExitCode: 1}, // grep didn't find anything + {Cmd: ipsetRestoreStringSlice}, + } + } return []testutils.TestCmd{fakeRestoreSuccessCommand} } diff --git a/npm/pkg/dataplane/policies/chain-management_linux.go b/npm/pkg/dataplane/policies/chain-management_linux.go index dafc828c6e..2168706584 100644 --- a/npm/pkg/dataplane/policies/chain-management_linux.go +++ b/npm/pkg/dataplane/policies/chain-management_linux.go @@ -92,6 +92,7 @@ func (pMgr *PolicyManager) initializeNPMChains() error { func (pMgr *PolicyManager) removeNPMChains() error { deleteErrCode, deleteErr := pMgr.runIPTablesCommand(util.IptablesDeletionFlag, jumpFromForwardToAzureChainArgs...) hadDeleteError := deleteErr != nil && deleteErrCode != couldntLoadTargetErrorCode + // TODO check rule doesn't exist error code instead. The first call of dp.Reset() we will have exit code 2 (couldn't load target) since AZURE-NPM won't exist if hadDeleteError { baseErrString := "failed to delete jump from FORWARD chain to AZURE-NPM chain" metrics.SendErrorLogAndMetric(util.IptmID, "Error: %s with exit code %d and error: %s", baseErrString, deleteErrCode, deleteErr.Error()) @@ -216,7 +217,7 @@ func (pMgr *PolicyManager) getCreatorForInitChains() *ioutil.FileCreator { func (pMgr *PolicyManager) positionAzureChainJumpRule() error { kubeServicesLine, kubeServicesLineNumErr := pMgr.getChainLineNumber(util.IptablesKubeServicesChain) if kubeServicesLineNumErr != nil { - // not possible to cover this branch currently because of testing limitations for pipeCommandToGrep() + // not possible to cover this branch currently because of testing limitations for PipeCommandToGrep() baseErrString := "failed to get index of jump from KUBE-SERVICES chain to FORWARD chain with error" metrics.SendErrorLogAndMetric(util.IptmID, "Error: %s: %s", baseErrString, kubeServicesLineNumErr.Error()) return npmerrors.SimpleErrorWrapper(baseErrString, kubeServicesLineNumErr) @@ -253,7 +254,7 @@ func (pMgr *PolicyManager) positionAzureChainJumpRule() error { npmChainLine, npmLineNumErr := pMgr.getChainLineNumber(util.IptablesAzureChain) if npmLineNumErr != nil { - // not possible to cover this branch currently because of testing limitations for pipeCommandToGrep() + // not possible to cover this branch currently because of testing limitations for PipeCommandToGrep() baseErrString := "failed to get index of jump from FORWARD chain to AZURE-NPM chain" metrics.SendErrorLogAndMetric(util.IptmID, "Error: %s: %s", baseErrString, npmLineNumErr.Error()) // FIXME update ID @@ -298,10 +299,10 @@ func (pMgr *PolicyManager) getChainLineNumber(chain string) (int, error) { util.IptablesWaitFlag, defaultlockWaitTimeInSeconds, util.IptablesTableFlag, util.IptablesFilterTable, util.IptablesNumericFlag, util.IptablesListFlag, util.IptablesForwardChain, util.IptablesLineNumbersFlag, ) - grepCommand := pMgr.ioShim.Exec.Command("grep", chain) - searchResults, gotMatches, err := pipeCommandToGrep(listForwardEntriesCommand, grepCommand) + grepCommand := pMgr.ioShim.Exec.Command(ioutil.Grep, chain) + searchResults, gotMatches, err := ioutil.PipeCommandToGrep(listForwardEntriesCommand, grepCommand) if err != nil { - // not possible to cover this branch currently because of testing limitations for pipeCommandToGrep() + // not possible to cover this branch currently because of testing limitations for PipeCommandToGrep() return 0, npmerrors.SimpleErrorWrapper(fmt.Sprintf("failed to determine line number for jump from FORWARD chain to %s chain", chain), err) } if !gotMatches { @@ -314,39 +315,11 @@ func (pMgr *PolicyManager) getChainLineNumber(chain string) (int, error) { return 0, nil } -func pipeCommandToGrep(command, grepCommand utilexec.Cmd) (searchResults []byte, gotMatches bool, commandError error) { - pipe, commandError := command.StdoutPipe() - if commandError != nil { - return - } - - closePipe := func() { _ = pipe.Close() } // appease go lint - defer closePipe() - - commandError = command.Start() - if commandError != nil { - return - } - - // Without this wait, defunct iptable child process are created - wait := func() { _ = command.Wait() } // appease go lint - defer wait() - - output, err := grepCommand.CombinedOutput() - if err != nil { - // grep returns err status 1 if nothing is found - return - } - searchResults = output - gotMatches = true - return -} - // make this a function for easier testing func (pMgr *PolicyManager) getCreatorAndChainsForReset() (creator *ioutil.FileCreator, chainsToFlush []string) { oldPolicyChains, err := pMgr.getPolicyChainNames() if err != nil { - // not possible to cover this branch currently because of testing limitations for pipeCommandToGrep() + // not possible to cover this branch currently because of testing limitations for PipeCommandToGrep() metrics.SendErrorLogAndMetric(util.IptmID, "Error: failed to determine NPM ingress/egress policy chains to delete") } chainsToFlush = iptablesOldAndNewChains @@ -361,10 +334,10 @@ func (pMgr *PolicyManager) getPolicyChainNames() ([]string, error) { util.IptablesWaitFlag, defaultlockWaitTimeInSeconds, util.IptablesTableFlag, util.IptablesFilterTable, util.IptablesNumericFlag, util.IptablesListFlag, ) - grepCommand := pMgr.ioShim.Exec.Command("grep", ingressOrEgressPolicyChainPattern) - searchResults, gotMatches, err := pipeCommandToGrep(iptablesListCommand, grepCommand) + grepCommand := pMgr.ioShim.Exec.Command(ioutil.Grep, ingressOrEgressPolicyChainPattern) + searchResults, gotMatches, err := ioutil.PipeCommandToGrep(iptablesListCommand, grepCommand) if err != nil { - // not possible to cover this branch currently because of testing limitations for pipeCommandToGrep() + // not possible to cover this branch currently because of testing limitations for PipeCommandToGrep() return nil, npmerrors.SimpleErrorWrapper("failed to get policy chain names", err) } if !gotMatches { diff --git a/npm/pkg/dataplane/policies/chain-management_linux_test.go b/npm/pkg/dataplane/policies/chain-management_linux_test.go index e49147e741..73482e4abe 100644 --- a/npm/pkg/dataplane/policies/chain-management_linux_test.go +++ b/npm/pkg/dataplane/policies/chain-management_linux_test.go @@ -14,12 +14,11 @@ import ( func TestInitChainsCreator(t *testing.T) { pMgr := NewPolicyManager(common.NewMockIOShim(nil)) creator := pMgr.getCreatorForInitChains() // doesn't make any exec calls - actualFileString := creator.ToString() + actualLines := strings.Split(creator.ToString(), "\n") expectedLines := []string{"*filter"} for _, chain := range iptablesAzureChains { expectedLines = append(expectedLines, fmt.Sprintf(":%s - -", chain)) } - expectedLines = append(expectedLines, []string{ "-A AZURE-NPM -j AZURE-NPM-INGRESS", "-A AZURE-NPM -j AZURE-NPM-EGRESS", @@ -33,8 +32,7 @@ func TestInitChainsCreator(t *testing.T) { "-A AZURE-NPM-ACCEPT -j ACCEPT", "COMMIT\n", }...) - expectedFileString := strings.Join(expectedLines, "\n") - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + dptestutils.AssertEqualLines(t, expectedLines, actualLines) } func TestInitChainsSuccess(t *testing.T) { @@ -100,14 +98,13 @@ func TestRemoveChainsCreator(t *testing.T) { "AZURE-NPM-EGRESS-123456", } require.Equal(t, expectedChainsToFlush, chainsToFlush) - actualFileString := creator.ToString() + actualLines := strings.Split(creator.ToString(), "\n") expectedLines := []string{"*filter"} for _, chain := range expectedChainsToFlush { expectedLines = append(expectedLines, fmt.Sprintf(":%s - -", chain)) } expectedLines = append(expectedLines, "COMMIT\n") - expectedFileString := strings.Join(expectedLines, "\n") - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, actualFileString) + dptestutils.AssertEqualLines(t, expectedLines, actualLines) } func TestRemoveChainsSuccess(t *testing.T) { diff --git a/npm/pkg/dataplane/policies/policymanager_linux.go b/npm/pkg/dataplane/policies/policymanager_linux.go index 3e1fbd00f0..0313b4bac8 100644 --- a/npm/pkg/dataplane/policies/policymanager_linux.go +++ b/npm/pkg/dataplane/policies/policymanager_linux.go @@ -144,6 +144,7 @@ func (pMgr *PolicyManager) deleteJumpRule(policy *NPMNetworkPolicy, isIngress bo specs = append([]string{baseChainName}, specs...) errCode, err := pMgr.runIPTablesCommand(util.IptablesDeletionFlag, specs...) if err != nil && errCode != couldntLoadTargetErrorCode { + // TODO check rule doesn't exist error code instead because the chain should exist errorString := fmt.Sprintf("failed to delete jump from %s chain to %s chain for policy %s with exit code %d", baseChainName, chainName, policy.Name, errCode) log.Errorf(errorString+": %w", err) return npmerrors.SimpleErrorWrapper(errorString, err) diff --git a/npm/pkg/dataplane/policies/policymanager_linux_test.go b/npm/pkg/dataplane/policies/policymanager_linux_test.go index 0f6b73e2af..da5ed289ef 100644 --- a/npm/pkg/dataplane/policies/policymanager_linux_test.go +++ b/npm/pkg/dataplane/policies/policymanager_linux_test.go @@ -37,7 +37,7 @@ func TestAddPolicies(t *testing.T) { calls := []testutils.TestCmd{fakeIPTablesRestoreCommand} pMgr := NewPolicyManager(common.NewMockIOShim(calls)) creator := pMgr.getCreatorForNewNetworkPolicies(TestNetworkPolicies...) - fileString := creator.ToString() + actualLines := strings.Split(creator.ToString(), "\n") expectedLines := []string{ "*filter", // all chains @@ -60,8 +60,7 @@ func TestAddPolicies(t *testing.T) { fmt.Sprintf("-I AZURE-NPM-EGRESS 2 %s", testPolicy3EgressJump), "COMMIT\n", } - expectedFileString := strings.Join(expectedLines, "\n") - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, fileString) + dptestutils.AssertEqualLines(t, expectedLines, actualLines) err := pMgr.addPolicy(TestNetworkPolicies[0], nil) require.NoError(t, err) @@ -83,7 +82,7 @@ func TestRemovePolicies(t *testing.T) { } pMgr := NewPolicyManager(common.NewMockIOShim(calls)) creator := pMgr.getCreatorForRemovingPolicies(TestNetworkPolicies...) - fileString := creator.ToString() + actualLines := strings.Split(creator.ToString(), "\n") expectedLines := []string{ "*filter", fmt.Sprintf(":%s - -", testPolicy1IngressChain), @@ -92,8 +91,7 @@ func TestRemovePolicies(t *testing.T) { fmt.Sprintf(":%s - -", testPolicy3EgressChain), "COMMIT\n", } - expectedFileString := strings.Join(expectedLines, "\n") - dptestutils.AssertEqualMultilineStrings(t, expectedFileString, fileString) + dptestutils.AssertEqualLines(t, expectedLines, actualLines) err := pMgr.AddPolicy(TestNetworkPolicies[0], nil) // need the policy in the cache require.NoError(t, err) diff --git a/npm/pkg/dataplane/testutils/file-comparison.go b/npm/pkg/dataplane/testutils/file-comparison.go index bd7d1ca76a..76c9e813b5 100644 --- a/npm/pkg/dataplane/testutils/file-comparison.go +++ b/npm/pkg/dataplane/testutils/file-comparison.go @@ -8,17 +8,15 @@ import ( "github.com/stretchr/testify/require" ) -func AssertEqualMultilineStrings(t *testing.T, expectedMultilineString, actualMultilineString string) { - if expectedMultilineString == actualMultilineString { +func AssertEqualLines(t *testing.T, expectedLines, actualLines []string) { + if strings.Join(expectedLines, "\n") == strings.Join(actualLines, "\n") { return } fmt.Println("EXPECTED FILE STRING:") - expectedLines := strings.Split(expectedMultilineString, "\n") for _, line := range expectedLines { fmt.Println(line) } fmt.Println("ACTUAL FILE STRING") - actualLines := strings.Split(actualMultilineString, "\n") for _, line := range actualLines { fmt.Println(line) } diff --git a/test/integration/npm/main.go b/test/integration/npm/main.go index 0f0320ac97..0f07564a75 100644 --- a/test/integration/npm/main.go +++ b/test/integration/npm/main.go @@ -98,6 +98,12 @@ func main() { // remove members from some sets and delete some sets panicOnError(dp.RemoveFromSets([]*ipsets.IPSetMetadata{ipsets.TestNSSet.Metadata}, podMetadataB)) + podMetadataD := &dataplane.PodMetadata{ + PodKey: "d", + PodIP: "1.2.3.4", + NodeName: "", + } + panicOnError(dp.AddToSets([]*ipsets.IPSetMetadata{ipsets.TestKeyPodSet.Metadata, ipsets.TestNSSet.Metadata}, podMetadataD)) dp.DeleteIPSet(ipsets.TestKVPodSet.Metadata) panicOnError(dp.ApplyDataPlane()) @@ -119,6 +125,9 @@ func testPolicyManager() { panicOnError(pMgr.Reset()) printAndWait() + panicOnError(pMgr.Initialize()) + printAndWait() + panicOnError(pMgr.AddPolicy(policies.TestNetworkPolicies[0], nil)) printAndWait() diff --git a/test/utils/utils.go b/test/utils/utils.go index 8909abd8d7..276bb0afdf 100644 --- a/test/utils/utils.go +++ b/test/utils/utils.go @@ -18,9 +18,11 @@ import ( ) type TestCmd struct { - Cmd []string - Stdout string // fakexec doesn't leverage stderr in CombinedOutput, so use stdout for stderr too - ExitCode int + Cmd []string + Stdout string // fakexec doesn't leverage stderr in CombinedOutput, so use stdout for stderr too + ExitCode int + HasStartError bool + PipedToCommand bool } func GetFakeExecWithScripts(calls []TestCmd) *fakeexec.FakeExec { @@ -33,8 +35,12 @@ func GetFakeExecWithScripts(calls []TestCmd) *fakeexec.FakeExec { ccmd := call.Cmd if call.ExitCode != 0 { err := &fakeexec.FakeExitError{Status: call.ExitCode} - fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, func() ([]byte, []byte, error) { return []byte(stdout), nil, err }) - } else { + if call.HasStartError { + fcmd.StartResponse = err + } else if !call.PipedToCommand { + fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, func() ([]byte, []byte, error) { return []byte(stdout), nil, err }) + } + } else if !call.PipedToCommand { fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, func() ([]byte, []byte, error) { return []byte(stdout), nil, nil }) } @@ -51,7 +57,7 @@ func GetFakeExecWithScripts(calls []TestCmd) *fakeexec.FakeExec { func VerifyCalls(t *testing.T, fexec *fakeexec.FakeExec, calls []TestCmd) { err := recover() require.Nil(t, err) - require.Equalf(t, len(calls), fexec.CommandCalls, "Number of exec calls mismatched, expected [%d], actual [%d]", fexec.CommandCalls, len(calls)) + require.Equalf(t, len(calls), fexec.CommandCalls, "Number of exec calls mismatched, expected [%d], actual [%d]", len(calls), fexec.CommandCalls) } func isCurrentUserRoot() bool {