diff --git a/Makefile b/Makefile index d0cf5f33ea..9e9f238ca0 100644 --- a/Makefile +++ b/Makefile @@ -724,7 +724,7 @@ test-integration: ## run all integration tests. go test -mod=readonly -buildvcs=false -timeout 1h -coverpkg=./... -race -covermode atomic -coverprofile=coverage.out -tags=integration ./test/integration... test-validate-state: - cd test/integration/load && go test -count 1 -timeout 30m -tags load -run ^TestValidateState -tags=load -restart-case=$(RESTART_CASE) -os=$(OS) + cd test/integration/load && go test -mod=readonly -count=1 -timeout 30m -tags load -run ^TestValidateState -tags=load -restart-case=$(RESTART_CASE) -os=$(OS) cd ../../.. test-cyclonus: ## run the cyclonus test for npm. diff --git a/test/integration/load/load_test.go b/test/integration/load/load_test.go index e84c0bcc80..b409bed425 100644 --- a/test/integration/load/load_test.go +++ b/test/integration/load/load_test.go @@ -5,7 +5,6 @@ package load import ( "context" "flag" - "fmt" "testing" "time" @@ -36,13 +35,6 @@ var noopDeploymentMap = map[string]string{ "linux": manifestDir + "/noop-deployment-linux.yaml", } -// Todo: Add the validation for the data path function for the linux/windows client. -type stateValidator interface { - ValidateStateFile(context.Context) error - ValidateRestartNetwork(context.Context) error - // ValidateDataPath() error -} - /* In order to run the scale tests, you need a k8s cluster and its kubeconfig. If no kubeconfig is passed, the test will attempt to find one in the default location for kubectl config. @@ -60,10 +52,10 @@ todo: consider adding the following scenarios - [x] Test the CNS Local cache. - [x] Test the Cilium state file. - [x] Test the Node restart. -- [ ] Test based on operating system. -- [ ] Test the HNS state file. -- [ ] Parameterize the os, cni and number of iterations. -- [ ] Add deployment yaml for windows. +- [x] Test based on operating system. +- [x] Test the HNS state file. +- [x] Parameterize the os, cni and number of iterations. +- [x] Add deployment yaml for windows. */ func TestLoad(t *testing.T) { clientset, err := k8sutils.MustGetClientset() @@ -139,39 +131,13 @@ func TestValidateState(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) defer cancel() - var validator stateValidator - - t.Log("Validating the state file") - switch *osType { - case "linux": - validator, err = validate.CreateLinuxValidator(ctx, clientset, config, namespace, *cniType, *restartCase) - if err != nil { - t.Fatal(err) - } - case "windows": - validator, err = validate.CreateWindowsValidator(ctx, clientset, config, namespace, *cniType, *restartCase) - if err != nil { - t.Fatal(err) - } - default: - t.Fatalf("unknown os type %s", *osType) - } - - err = validator.ValidateStateFile(ctx) + validator, err := validate.CreateValidator(ctx, clientset, config, namespace, *cniType, *restartCase, *osType) if err != nil { t.Fatal(err) } - - // We are restarting the systmemd network and checking that the connectivity works after the restart. For more details: https://github.com/cilium/cilium/issues/18706 - t.Log("Validating the restart network scenario") - t.Run(fmt.Sprintf("validate network restart - %s", *osType), func(t *testing.T) { - if *osType == "windows" { - t.Skip("validate network restart not implemented on Windows") - } - if err := validator.ValidateRestartNetwork(ctx); err != nil { - t.Fatal(err) - } - }) + if err := validator.Validate(ctx); err != nil { + t.Fatal(err) + } } // TestScaleDeployment scales the deployment up/down based on the replicas passed. diff --git a/test/validate/linux_validate.go b/test/validate/linux_validate.go index d97a8b3f5b..05048f628b 100644 --- a/test/validate/linux_validate.go +++ b/test/validate/linux_validate.go @@ -1,23 +1,14 @@ package validate import ( - "context" "encoding/json" - "log" "github.com/Azure/azure-container-networking/cns" restserver "github.com/Azure/azure-container-networking/cns/restserver" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" "github.com/pkg/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" ) const ( - privilegedDaemonSetPath = "../manifests/load/privileged-daemonset.yaml" - privilegedLabelSelector = "app=privileged-daemonset" - privilegedNamespace = "kube-system" - cnsLabelSelector = "k8s-app=azure-cns" ciliumLabelSelector = "k8s-app=cilium" ) @@ -31,12 +22,15 @@ var ( type stateFileIpsFunc func([]byte) (map[string]string, error) -type LinuxValidator struct { - clientset *kubernetes.Clientset - config *rest.Config - namespace string - cni string - restartCase bool +var linuxChecksMap = map[string][]check{ + "cilium": { + {"cns", cnsStateFileIps, cnsLabelSelector, privilegedNamespace, cnsStateFileCmd}, + {"cilium", ciliumStateFileIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd}, + {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, + }, + "cniv2": { + {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, + }, } type CnsState struct { @@ -64,79 +58,6 @@ type Address struct { Addr string `json:"ipv4"` } -func CreateLinuxValidator(ctx context.Context, clienset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool) (*LinuxValidator, error) { - // deploy privileged pod - privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedDaemonSetPath) - if err != nil { - return nil, errors.Wrap(err, "unable to parse daemonset") - } - daemonsetClient := clienset.AppsV1().DaemonSets(privilegedNamespace) - if err := k8sutils.MustCreateDaemonset(ctx, daemonsetClient, privilegedDaemonSet); err != nil { - return nil, errors.Wrap(err, "unable to create daemonset") - } - if err := k8sutils.WaitForPodsRunning(ctx, clienset, privilegedNamespace, privilegedLabelSelector); err != nil { - return nil, errors.Wrap(err, "error while waiting for pods to be running") - } - - return &LinuxValidator{ - clientset: clienset, - config: config, - namespace: namespace, - cni: cni, - restartCase: restartCase, - }, nil -} - -// Todo: Based on cni version validate different state files -func (v *LinuxValidator) ValidateStateFile(ctx context.Context) error { - checkSet := make(map[string][]check) // key is cni type, value is a list of check - // TODO: add cniv1 when adding Linux related test cases - checkSet["cilium"] = []check{ - {"cns", cnsStateFileIps, cnsLabelSelector, privilegedNamespace, cnsStateFileCmd}, - {"cilium", ciliumStateFileIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd}, - {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, - } - - checkSet["cniv2"] = []check{ - {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, - } - - for _, check := range checkSet[v.cni] { - err := v.validateIPs(ctx, check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) - if err != nil { - return err - } - } - return nil -} - -func (v *LinuxValidator) ValidateRestartNetwork(ctx context.Context) error { - nodes, err := k8sutils.GetNodeList(ctx, v.clientset) - if err != nil { - return errors.Wrapf(err, "failed to get node list") - } - - for index := range nodes.Items { - // get the privileged pod - pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, privilegedNamespace, privilegedLabelSelector, nodes.Items[index].Name) - if err != nil { - return errors.Wrapf(err, "failed to get privileged pod") - } - - privelegedPod := pod.Items[0] - // exec into the pod to get the state file - _, err = k8sutils.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privelegedPod.Name, restartNetworkCmd, v.config) - if err != nil { - return errors.Wrapf(err, "failed to exec into privileged pod") - } - err = k8sutils.WaitForPodsRunning(ctx, v.clientset, "", "") - if err != nil { - return errors.Wrapf(err, "failed to wait for pods running") - } - } - return nil -} - func cnsStateFileIps(result []byte) (map[string]string, error) { var cnsResult CnsState err := json.Unmarshal(result, &cnsResult) @@ -188,43 +109,3 @@ func cnsCacheStateFileIps(result []byte) (map[string]string, error) { } return cnsPodIps, nil } - -func (v *LinuxValidator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { - log.Printf("Validating %s state file", checkType) - nodes, err := k8sutils.GetNodeList(ctx, v.clientset) - if err != nil { - return errors.Wrapf(err, "failed to get node list") - } - - for index := range nodes.Items { - // get the privileged pod - pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, namespace, labelSelector, nodes.Items[index].Name) - if err != nil { - return errors.Wrapf(err, "failed to get privileged pod") - } - podName := pod.Items[0].Name - // exec into the pod to get the state file - result, err := k8sutils.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config) - if err != nil { - return errors.Wrapf(err, "failed to exec into privileged pod") - } - filePodIps, err := stateFileIps(result) - if err != nil { - return errors.Wrapf(err, "failed to get pod ips from state file") - } - if len(filePodIps) == 0 && v.restartCase { - log.Printf("No pods found on node %s", nodes.Items[index].Name) - continue - } - // get the pod ips - podIps := getPodIPsWithoutNodeIP(ctx, v.clientset, nodes.Items[index]) - - check := compareIPs(filePodIps, podIps) - - if !check { - return errors.Wrapf(errors.New("State file validation failed"), "for %s on node %s", checkType, nodes.Items[index].Name) - } - } - log.Printf("State file validation for %s passed", checkType) - return nil -} diff --git a/test/validate/validate.go b/test/validate/validate.go new file mode 100644 index 0000000000..8abe8f349f --- /dev/null +++ b/test/validate/validate.go @@ -0,0 +1,174 @@ +package validate + +import ( + "context" + "log" + + k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/pkg/errors" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +var privilegedDaemonSetPathMap = map[string]string{ + "windows": "../manifests/load/privileged-daemonset-windows.yaml", + "linux": "../manifests/load/privileged-daemonset.yaml", +} + +var nodeSelectorMap = map[string]string{ + "windows": "kubernetes.io/os=windows", + "linux": "kubernetes.io/os=linux", +} + +const ( + privilegedLabelSelector = "app=privileged-daemonset" + privilegedNamespace = "kube-system" +) + +type Validator struct { + clientset *kubernetes.Clientset + config *rest.Config + checks []check + namespace string + cni string + restartCase bool + os string +} + +type check struct { + name string + stateFileIps func([]byte) (map[string]string, error) + podLabelSelector string + podNamespace string + cmd []string +} + +func CreateValidator(ctx context.Context, clienset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool, os string) (*Validator, error) { + // deploy privileged pod + privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedDaemonSetPathMap[os]) + if err != nil { + return nil, errors.Wrap(err, "unable to parse daemonset") + } + daemonsetClient := clienset.AppsV1().DaemonSets(privilegedNamespace) + if err := k8sutils.MustCreateDaemonset(ctx, daemonsetClient, privilegedDaemonSet); err != nil { + return nil, errors.Wrap(err, "unable to create daemonset") + } + if err := k8sutils.WaitForPodsRunning(ctx, clienset, privilegedNamespace, privilegedLabelSelector); err != nil { + return nil, errors.Wrap(err, "error while waiting for pods to be running") + } + + var checks []check + switch os { + case "windows": + checks = windowsChecksMap[cni] + case "linux": + checks = linuxChecksMap[cni] + default: + return nil, errors.Errorf("unsupported os: %s", os) + } + + return &Validator{ + clientset: clienset, + config: config, + namespace: namespace, + cni: cni, + restartCase: restartCase, + checks: checks, + os: os, + }, nil +} + +func (v *Validator) Validate(ctx context.Context) error { + log.Printf("Validating State File") + err := v.ValidateStateFile(ctx) + if err != nil { + return errors.Wrapf(err, "failed to validate state file") + } + + if v.os == "linux" { + // We are restarting the systmemd network and checking that the connectivity works after the restart. For more details: https://github.com/cilium/cilium/issues/18706 + log.Printf("Validating the restart network scenario") + err = v.ValidateRestartNetwork(ctx) + if err != nil { + return errors.Wrapf(err, "failed to validate restart network scenario") + } + } + return nil +} + +func (v *Validator) ValidateStateFile(ctx context.Context) error { + for _, check := range v.checks { + err := v.validateIPs(ctx, check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) + if err != nil { + return err + } + } + return nil +} + +func (v *Validator) ValidateRestartNetwork(ctx context.Context) error { + nodes, err := k8sutils.GetNodeList(ctx, v.clientset) + if err != nil { + return errors.Wrapf(err, "failed to get node list") + } + + for index := range nodes.Items { + // get the privileged pod + pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, privilegedNamespace, privilegedLabelSelector, nodes.Items[index].Name) + if err != nil { + return errors.Wrapf(err, "failed to get privileged pod") + } + + privelegedPod := pod.Items[0] + // exec into the pod to get the state file + _, err = k8sutils.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privelegedPod.Name, restartNetworkCmd, v.config) + if err != nil { + return errors.Wrapf(err, "failed to exec into privileged pod") + } + err = k8sutils.WaitForPodsRunning(ctx, v.clientset, "", "") + if err != nil { + return errors.Wrapf(err, "failed to wait for pods running") + } + } + return nil +} + +func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { + log.Printf("Validating %s state file", checkType) + nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) + if err != nil { + return errors.Wrapf(err, "failed to get node list") + } + + for index := range nodes.Items { + // get the privileged pod + pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, namespace, labelSelector, nodes.Items[index].Name) + if err != nil { + return errors.Wrapf(err, "failed to get privileged pod") + } + podName := pod.Items[0].Name + // exec into the pod to get the state file + result, err := k8sutils.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config) + if err != nil { + return errors.Wrapf(err, "failed to exec into privileged pod") + } + filePodIps, err := stateFileIps(result) + if err != nil { + return errors.Wrapf(err, "failed to get pod ips from state file") + } + if len(filePodIps) == 0 && v.restartCase { + log.Printf("No pods found on node %s", nodes.Items[index].Name) + continue + } + // get the pod ips + podIps := getPodIPsWithoutNodeIP(ctx, v.clientset, nodes.Items[index]) + + check := compareIPs(filePodIps, podIps) + + if !check { + return errors.Wrapf(errors.New("State file validation failed"), "for %s on node %s", checkType, nodes.Items[index].Name) + } + } + log.Printf("State file validation for %s passed", checkType) + return nil +} diff --git a/test/validate/windows_validate.go b/test/validate/windows_validate.go index 6ce997a173..0d15c0a245 100644 --- a/test/validate/windows_validate.go +++ b/test/validate/windows_validate.go @@ -1,20 +1,10 @@ package validate import ( - "context" "encoding/json" - "log" "net" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" "github.com/pkg/errors" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -const ( - privilegedWindowsDaemonSetPath = "../manifests/load/privileged-daemonset-windows.yaml" - windowsNodeSelector = "kubernetes.io/os=windows" ) var ( @@ -23,12 +13,15 @@ var ( azureVnetIpamCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet-ipam.json"} ) -type WindowsValidator struct { - clientset *kubernetes.Clientset - config *rest.Config - namespace string - cni string - restartCase bool +var windowsChecksMap = map[string][]check{ + "cniv1": { + {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd}, + {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, + {"azure-vnet-ipam", azureVnetIpamIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamCmd}, + }, + "cniv2": { + {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, + }, } type HNSEndpoint struct { @@ -80,60 +73,6 @@ type AddressRecord struct { InUse bool } -type check struct { - name string - stateFileIps func([]byte) (map[string]string, error) - podLabelSelector string - podNamespace string - cmd []string -} - -func CreateWindowsValidator(ctx context.Context, clienset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool) (*WindowsValidator, error) { - // deploy privileged pod - privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedWindowsDaemonSetPath) - if err != nil { - return nil, errors.Wrap(err, "unable to parse daemonset") - } - daemonsetClient := clienset.AppsV1().DaemonSets(privilegedNamespace) - if err := k8sutils.MustCreateDaemonset(ctx, daemonsetClient, privilegedDaemonSet); err != nil { - return nil, errors.Wrap(err, "unable to create daemonset") - } - if err := k8sutils.WaitForPodsRunning(ctx, clienset, privilegedNamespace, privilegedLabelSelector); err != nil { - return nil, errors.Wrap(err, "error while waiting for pods to be running") - } - return &WindowsValidator{ - clientset: clienset, - config: config, - namespace: namespace, - cni: cni, - restartCase: restartCase, - }, nil -} - -func (v *WindowsValidator) ValidateStateFile(ctx context.Context) error { - checkSet := make(map[string][]check) // key is cni type, value is a list of check - - checkSet["cniv1"] = []check{ - {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd}, - {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, - {"azure-vnet-ipam", azureVnetIpamIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamCmd}, - } - - checkSet["cniv2"] = []check{ - {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, - } - - // this is checking all IPs of the pods with the statefile - for _, check := range checkSet[v.cni] { - err := v.validateIPs(ctx, check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) - if err != nil { - return err - } - } - - return nil -} - func hnsStateFileIps(result []byte) (map[string]string, error) { var hnsResult []HNSEndpoint err := json.Unmarshal(result, &hnsResult) @@ -190,46 +129,3 @@ func azureVnetIpamIps(result []byte) (map[string]string, error) { } return azureVnetIpamPodIps, nil } - -func (v *WindowsValidator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { - log.Println("Validating ", checkType, " state file") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, v.clientset, windowsNodeSelector) - if err != nil { - return errors.Wrapf(err, "failed to get node list") - } - for index := range nodes.Items { - // get the privileged pod - pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, namespace, labelSelector, nodes.Items[index].Name) - if err != nil { - return errors.Wrapf(err, "failed to get privileged pod") - } - podName := pod.Items[0].Name - // exec into the pod to get the state file - result, err := k8sutils.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config) - if err != nil { - return errors.Wrapf(err, "failed to exec into privileged pod") - } - filePodIps, err := stateFileIps(result) - if err != nil { - return errors.Wrapf(err, "failed to get pod ips from state file") - } - if len(filePodIps) == 0 && v.restartCase { - log.Printf("No pods found on node %s", nodes.Items[index].Name) - continue - } - // get the pod ips - podIps := getPodIPsWithoutNodeIP(ctx, v.clientset, nodes.Items[index]) - - check := compareIPs(filePodIps, podIps) - - if !check { - return errors.Wrapf(errors.New("State file validation failed"), "for %s on node %s", checkType, nodes.Items[index].Name) - } - } - log.Printf("State file validation for %s passed", checkType) - return nil -} - -func (v *WindowsValidator) ValidateRestartNetwork(context.Context) error { - return errors.New("not implemented") -}