From cb7d932735323a52b49802ef3c0218a3fbdff9d8 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 21 Jun 2023 21:45:40 -0400 Subject: [PATCH 01/36] add Linux test cases for connectivity --- .../datapath/datapath_linux_test.go | 238 ++++++++++++++++++ .../manifests/datapath/linux-deployment.yaml | 74 ++++++ test/internal/datapath/datapath_linux.go | 167 ++++++++++++ test/internal/k8sutils/utils.go | 1 - 4 files changed, 479 insertions(+), 1 deletion(-) create mode 100644 test/integration/datapath/datapath_linux_test.go create mode 100644 test/integration/manifests/datapath/linux-deployment.yaml create mode 100644 test/internal/datapath/datapath_linux.go diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go new file mode 100644 index 0000000000..acf9cea2f1 --- /dev/null +++ b/test/integration/datapath/datapath_linux_test.go @@ -0,0 +1,238 @@ +//go:build connection + +package connection + +import ( + "context" + "flag" + "fmt" + "testing" + "time" + + "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/goldpinger" + k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/retry" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + LinuxDeployYamlPath = "../manifests/datapath/linux-deployment.yaml" + podLabelKey = "app" + podCount = 2 + nodepoolKey = "agentpool" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + goldpingerRetryCount = 24 + goldpingerDelayTimeSeconds = 5 +) + +var ( + podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") + podNamespace = flag.String("namespace", "datapath-linux", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Node-Selector for pods") + defaultRetrier = retry.Retrier{ + Attempts: 10, + Delay: defaultRetryDelaySeconds * time.Second, + } +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is npwin, if the nodepool has a diferent name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +To run the test use one of the following commands: +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection + or +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=npwina -tags=connection + + +This test checks pod to pod, pod to node, and pod to internet for datapath connectivity. + +Timeout context is controled by the -timeout flag. + +*/ + +func TestDatapathLinux(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset, err := k8sutils.MustGetClientset() + if err != nil { + require.NoError(t, err, "could not get k8s clientset: %v", err) + } + t.Log("Get REST config") + restConfig := k8sutils.MustGetRestConfig(t) + + t.Log("Create Label Selectors") + + podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) + nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + require.NoError(t, err, "could not get k8s node list: %v", err) + } + + // Test Namespace + t.Log("Create Namespace") + err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + createPodFlag := !(apierrors.IsAlreadyExists(err)) + t.Logf("%v", createPodFlag) + + if createPodFlag { + t.Log("Creating Linux pods through deployment") + deployment, err := k8sutils.MustParseDeployment(LinuxDeployYamlPath) + if err != nil { + require.NoError(t, err) + } + + // Fields for overwritting existing deployment yaml. + // Defaults from flags will not change anything + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + deployment.Name = *podPrefix + deployment.Namespace = *podNamespace + + t.Logf("deployment Spec Template is %+v", deployment.Spec.Template) + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + if err != nil { + require.NoError(t, err) + } + t.Logf("podNamespace is %s", *podNamespace) + t.Logf("podLabelSelector is %s", podLabelSelector) + + t.Log("Waiting for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + require.NoError(t, err) + } + t.Log("Successfully created customer linux pods") + } else { + // Checks namespace already exists from previous attempt + t.Log("Namespace already exists") + + t.Log("Checking for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + require.NoError(t, err) + } + } + t.Log("Checking Linux test environment") + for _, node := range nodes.Items { + + pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + require.NoError(t, err, "could not get k8s clientset: %v", err) + } + if len(pods.Items) <= 1 { + t.Logf("%s", node.Name) + require.NoError(t, errors.New("Less than 2 pods on node")) + } + } + t.Log("Linux test environment ready") + + t.Run("Linux ping tests", func(t *testing.T) { + // Check goldpinger health + t.Run("all pods have IPs assigned", func(t *testing.T) { + podsClient := clientset.CoreV1().Pods(*podNamespace) + + checkPodIPsFn := func() error { + podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) + t.Logf("podList is %+v", podList) + if err != nil { + return err + } + + if len(podList.Items) == 0 { + return errors.New("no pods scheduled") + } + + for _, pod := range podList.Items { + if pod.Status.Phase == apiv1.PodPending { + return errors.New("some pods still pending") + } + } + + for _, pod := range podList.Items { + if pod.Status.PodIP == "" { + return errors.New("a pod has not been allocated an IP") + } + } + + return nil + } + err := defaultRetrier.Do(ctx, checkPodIPsFn) + if err != nil { + t.Fatalf("not all pods were allocated IPs: %v", err) + } + t.Log("all pods have been allocated IPs") + }) + + t.Run("all linux pods can ping each other", func(t *testing.T) { + pfOpts := k8s.PortForwardingOpts{ + Namespace: "default", + LabelSelector: "type=goldpinger-pod", + LocalPort: 9090, + DestPort: 8080, + } + + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + if err != nil { + t.Fatal(err) + } + + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + portForwardFn := func() error { + err := pf.Forward(portForwardCtx) + if err != nil { + t.Logf("unable to start port forward: %v", err) + return err + } + return nil + } + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + t.Fatalf("could not start port forward within %ds: %v", defaultTimeoutSeconds, err) + } + defer pf.Stop() + + gpClient := goldpinger.Client{Host: pf.Address()} + + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + clusterCheckFn := func() error { + clusterState, err := gpClient.CheckAll(clusterCheckCtx) + if err != nil { + return err + } + + stats := goldpinger.ClusterStats(clusterState) + stats.PrintStats() + if stats.AllPingsHealthy() { + return nil + } + + return errors.New("not all pings are healthy") + } + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) + } + + t.Log("all pings successful!") + }) + }) +} diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml new file mode 100644 index 0000000000..4b029c28c3 --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: datapath-linux +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + serviceAccount: "goldpinger-serviceaccount" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: "docker.io/bloomberg/goldpinger:v3.0.0" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux diff --git a/test/internal/datapath/datapath_linux.go b/test/internal/datapath/datapath_linux.go new file mode 100644 index 0000000000..f32d4cda3f --- /dev/null +++ b/test/internal/datapath/datapath_linux.go @@ -0,0 +1,167 @@ +package datapath + +import ( + "context" + "fmt" + + "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" +) + +func LinuxPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { + logrus.Infof("Get Pods for Linux Node: %s", nodeName) + pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) + if err != nil { + logrus.Error(err) + return errors.Wrap(err, "k8s api call") + } + if len(pods.Items) <= 1 { + return errors.New("Less than 2 pods on node") + } + + // Get first pod on this node + firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) + } + logrus.Infof("First pod: %v %v", firstPod.Name, firstPod.Status.PodIP) + + // Get the second pod on this node + secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) + } + logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) + + // Ping the second pod from the first pod + return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) +} + +// func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName1, nodeName2, podNamespace, labelSelector string, rc *restclient.Config) error { +// logrus.Infof("Get Pods for Node 1: %s", nodeName1) +// // Node 1 +// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName1) +// if err != nil { +// logrus.Error(err) +// return errors.Wrap(err, "k8s api call") +// } +// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) +// } +// logrus.Infof("First pod: %v %v", firstPod.Name, firstPod.Status.PodIP) + +// logrus.Infof("Get Pods for Node 2: %s", nodeName2) +// // Node 2 +// pods, err = k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName2) +// if err != nil { +// logrus.Error(err) +// return errors.Wrap(err, "k8s api call") +// } +// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) +// } +// logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) + +// // Ping the second pod from the first pod located on different nodes +// return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) +// } + +// func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, nodeIP, podNamespace, labelSelector string, rc *restclient.Config) error { +// logrus.Infof("Get Pods by Node: %s %s", nodeName, nodeIP) +// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) +// if err != nil { +// logrus.Error(err) +// return errors.Wrap(err, "k8s api call") +// } +// if len(pods.Items) <= 1 { +// return errors.New("Less than 2 pods on node") +// } +// // Get first pod on this node +// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) +// } +// logrus.Infof("First pod: %v", firstPod.Name) + +// // Get the second pod on this node +// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) +// } +// logrus.Infof("Second pod: %v", secondPod.Name) + +// // Ping from pod to node +// resultOne := podTest(ctx, clientset, firstPod, []string{"ping", nodeIP}, rc, pingPassedWindows) +// resultTwo := podTest(ctx, clientset, secondPod, []string{"ping", nodeIP}, rc, pingPassedWindows) + +// if resultOne != nil { +// return resultOne +// } + +// if resultTwo != nil { +// return resultTwo +// } + +// return nil +// } + +// func WindowsPodToInternet(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { +// logrus.Infof("Get Pods by Node: %s", nodeName) +// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) +// if err != nil { +// logrus.Error(err) +// return errors.Wrap(err, "k8s api call") +// } +// if len(pods.Items) <= 1 { +// return errors.New("Less than 2 pods on node") +// } + +// // Get first pod on this node +// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) +// } +// logrus.Infof("First pod: %v", firstPod.Name) + +// // Get the second pod on this node +// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) +// if err != nil { +// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) +// } +// logrus.Infof("Second pod: %v", secondPod.Name) + +// resultOne := podTest(ctx, clientset, firstPod, []string{"powershell", "Invoke-WebRequest", "www.bing.com", "-UseBasicParsing"}, rc, webRequestPassedWindows) +// resultTwo := podTest(ctx, clientset, secondPod, []string{"powershell", "Invoke-WebRequest", "www.bing.com", "-UseBasicParsing"}, rc, webRequestPassedWindows) + +// if resultOne != nil { +// return resultOne +// } + +// if resultTwo != nil { +// return resultTwo +// } + +// return nil +// } + +// func webRequestPassedWindows(output string) error { +// const searchString = "200 OK" +// if strings.Contains(output, searchString) { +// return nil +// } +// return errors.Wrapf(errors.New("Output did not contain \"200 OK\""), "output was: %s", output) +// } + +// func pingPassedWindows(output string) error { +// const searchString = "0% loss" +// if strings.Contains(output, searchString) { +// return nil +// } +// return errors.Wrapf(errors.New("Ping did not contain\"0% loss\""), "output was: %s", output) +// } diff --git a/test/internal/k8sutils/utils.go b/test/internal/k8sutils/utils.go index 4174595751..21873da72a 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/k8sutils/utils.go @@ -71,7 +71,6 @@ func mustParseResource(path string, out interface{}) error { if err := yaml.NewYAMLOrJSONDecoder(f, 0).Decode(out); err != nil { return err } - return err } From 60e384d2affc45bbb83de2e7dfe370312a58dd0d Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 14:21:29 -0400 Subject: [PATCH 02/36] pipeline ipv6 test cases --- ...h_win_test.go => datapath_windows_test.go} | 19 +++++-- .../manifests/load/privileged-daemonset.yaml | 12 ++++ .../manifests/noop-deployment-linux.yaml | 2 +- .../manifests/noop-deployment-windows.yaml | 2 +- .../{datapath_win.go => datapath_windows.go} | 57 ++++++++++++++++++- test/internal/k8sutils/utils_get.go | 6 +- test/validate/linux_validate.go | 20 +++---- test/validate/utils.go | 24 +++++++- test/validate/windows_validate.go | 34 +++++++---- 9 files changed, 142 insertions(+), 34 deletions(-) rename test/integration/datapath/{datapath_win_test.go => datapath_windows_test.go} (88%) rename test/internal/datapath/{datapath_win.go => datapath_windows.go} (80%) diff --git a/test/integration/datapath/datapath_win_test.go b/test/integration/datapath/datapath_windows_test.go similarity index 88% rename from test/integration/datapath/datapath_win_test.go rename to test/integration/datapath/datapath_windows_test.go index 054a60bb98..ccc8f82f19 100644 --- a/test/integration/datapath/datapath_win_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -36,9 +36,9 @@ k8s cluster with a windows nodepool consisting of at least 2 windows nodes. -nodepoolSelector="yournodepoolname" To run the test use one of the following commands: -go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection +go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection or -go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection +go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection This test checks pod to pod, pod to node, and pod to internet for datapath connectivity. @@ -110,7 +110,7 @@ func TestDatapathWin(t *testing.T) { require.NoError(t, err) } } - t.Log("Checking Windows test environment ") + t.Log("Checking Windows test environment") for _, node := range nodes.Items { pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) @@ -129,11 +129,13 @@ func TestDatapathWin(t *testing.T) { for _, node := range nodes.Items { t.Log("Windows ping tests (1)") nodeIP := "" + nodeIPv6 := "" for _, address := range node.Status.Addresses { if address.Type == "InternalIP" { nodeIP = address.Address - // Multiple addresses exist, break once Internal IP found. - // Cannot call directly + if net.ParseIP(address.Address).To16() != nil { + nodeIPv6 = address.Address + } break } } @@ -141,6 +143,13 @@ func TestDatapathWin(t *testing.T) { err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIP, *podNamespace, podLabelSelector, restConfig) require.NoError(t, err, "Windows pod to node, ping test failed with: %+v", err) t.Logf("Windows pod to node, passed for node: %s", node.Name) + + // windows ipv6 connectivity + if nodeIPv6 != "" { + err = datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIPv6, *podNamespace, podLabelSelector, restConfig) + require.NoError(t, err, "Windows pod to node, ipv6 ping test failed with: %+v", err) + t.Logf("Windows pod to node via ipv6, passed for node: %s", node.Name) + } } }) diff --git a/test/integration/manifests/load/privileged-daemonset.yaml b/test/integration/manifests/load/privileged-daemonset.yaml index 9bacdc4ebe..6448f56980 100644 --- a/test/integration/manifests/load/privileged-daemonset.yaml +++ b/test/integration/manifests/load/privileged-daemonset.yaml @@ -26,13 +26,25 @@ spec: volumeMounts: - mountPath: /var/run/azure-cns name: azure-cns + - mountPath: /var/run/azure-network + name: azure-network - mountPath: /host name: host-root + - mountPath: /var/run + name: azure-cns-noncilium volumes: - name: azure-cns hostPath: path: /var/run/azure-cns + - name: azure-network + hostPath: + path: /var/run/azure-network + - name: azure-cns-noncilium + hostPath: + path: /var/run - hostPath: path: / type: "" name: host-root + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/noop-deployment-linux.yaml b/test/integration/manifests/noop-deployment-linux.yaml index 6b12793189..4d4acd89c2 100644 --- a/test/integration/manifests/noop-deployment-linux.yaml +++ b/test/integration/manifests/noop-deployment-linux.yaml @@ -20,4 +20,4 @@ spec: securityContext: privileged: true nodeSelector: - "kubernetes.io/os": linux + kubernetes.io/os: linux diff --git a/test/integration/manifests/noop-deployment-windows.yaml b/test/integration/manifests/noop-deployment-windows.yaml index 3b35f044dc..7d6f5ef035 100644 --- a/test/integration/manifests/noop-deployment-windows.yaml +++ b/test/integration/manifests/noop-deployment-windows.yaml @@ -20,4 +20,4 @@ spec: ports: - containerPort: 80 nodeSelector: - "kubernetes.io/os": windows + kubernetes.io/os: windows diff --git a/test/internal/datapath/datapath_win.go b/test/internal/datapath/datapath_windows.go similarity index 80% rename from test/internal/datapath/datapath_win.go rename to test/internal/datapath/datapath_windows.go index 54a317760b..d59bb53f69 100644 --- a/test/internal/datapath/datapath_win.go +++ b/test/internal/datapath/datapath_windows.go @@ -3,6 +3,7 @@ package datapath import ( "context" "fmt" + "net" "strings" "github.com/Azure/azure-container-networking/test/internal/k8sutils" @@ -14,6 +15,8 @@ import ( restclient "k8s.io/client-go/rest" ) +var ipv6PrefixPolicy = []string{"curl", "-6", "-I", "-v", "www.bing.com"} + func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error { logrus.Infof("podTest() - %v %v", srcPod.Name, cmd) output, err := k8sutils.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) @@ -48,8 +51,27 @@ func WindowsPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes. } logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) + // ipv4 ping test // Ping the second pod from the first pod - return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + resultOne := podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + if resultOne != nil { + return resultOne + } + + // ipv6 ping test + // ipv6 Ping the second pod from the first pod + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultTwo := podTest(ctx, clientset, firstPod, []string{"ping", ip.IP}, rc, pingPassedWindows) + if resultTwo != nil { + return resultTwo + } + } + } + } + + return nil } func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName1, nodeName2, podNamespace, labelSelector string, rc *restclient.Config) error { @@ -80,7 +102,23 @@ func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes. logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) // Ping the second pod from the first pod located on different nodes - return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + resultOne := podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + if resultOne != nil { + return resultOne + } + + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultTwo := podTest(ctx, clientset, firstPod, []string{"ping ", ip.IP}, rc, pingPassedWindows) + if resultTwo != nil { + return resultTwo + } + } + } + } + + return nil } func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, nodeIP, podNamespace, labelSelector string, rc *restclient.Config) error { @@ -158,6 +196,21 @@ func WindowsPodToInternet(ctx context.Context, clientset *kubernetes.Clientset, return resultTwo } + // test Invoke-WebRequest an URL by IPv6 address on one pod + // command is: C:\inetpub\wwwroot>curl -6 -I -v www.bing.com + // then return * Trying [2620:1ec:c11::200]:80... + // HTTP/1.1 200 OK + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultThree := podTest(ctx, clientset, secondPod, ipv6PrefixPolicy, rc, webRequestPassedWindows) + if resultThree != nil { + return resultThree + } + } + } + } + return nil } diff --git a/test/internal/k8sutils/utils_get.go b/test/internal/k8sutils/utils_get.go index 531ec38fce..6c1ff2b0e6 100644 --- a/test/internal/k8sutils/utils_get.go +++ b/test/internal/k8sutils/utils_get.go @@ -43,9 +43,11 @@ func GetPodsIpsByNode(ctx context.Context, clientset *kubernetes.Clientset, name if err != nil { return nil, err } - ips := make([]string, 0, len(pods.Items)) + ips := make([]string, 0, len(pods.Items)*2) //nolint for index := range pods.Items { - ips = append(ips, pods.Items[index].Status.PodIP) + for _, podIP := range pods.Items[index].Status.PodIPs { + ips = append(ips, podIP.IP) + } } return ips, nil } diff --git a/test/validate/linux_validate.go b/test/validate/linux_validate.go index d2839f4098..99fe2a0e3e 100644 --- a/test/validate/linux_validate.go +++ b/test/validate/linux_validate.go @@ -91,20 +91,20 @@ func (l *LinuxClient) CreateClient(ctx context.Context, clienset *kubernetes.Cli // Todo: Based on cni version validate different state files func (v *LinuxValidator) ValidateStateFile() error { - checks := []struct { - name string - stateFileIps func([]byte) (map[string]string, error) - podLabelSelector string - podNamespace string - cmd []string - }{ + checkSet := make(map[string][]check) // key is cni type, value is a list of check + // TODO: add cniv1 when adding Linux related test cases + checkSet["cilium"] = []check{ {"cns", cnsStateFileIps, cnsLabelSelector, privilegedNamespace, cnsStateFileCmd}, {"cilium", ciliumStateFileIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd}, {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, } - for _, check := range checks { - err := v.validate(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) + checkSet["cniv2"] = []check{ + {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, + } + + for _, check := range checkSet[v.cni] { + err := v.validateIPs(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) if err != nil { return err } @@ -191,7 +191,7 @@ func cnsCacheStateFileIps(result []byte) (map[string]string, error) { return cnsPodIps, nil } -func (v *LinuxValidator) validate(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { +func (v *LinuxValidator) validateIPs(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { log.Printf("Validating %s state file", checkType) nodes, err := k8sutils.GetNodeList(v.ctx, v.clientset) if err != nil { diff --git a/test/validate/utils.go b/test/validate/utils.go index 7180c7bc66..4c81fe145a 100644 --- a/test/validate/utils.go +++ b/test/validate/utils.go @@ -2,6 +2,7 @@ package validate import ( "context" + "reflect" "github.com/Azure/azure-container-networking/test/internal/k8sutils" corev1 "k8s.io/api/core/v1" @@ -29,11 +30,30 @@ func getPodIPsWithoutNodeIP(ctx context.Context, clientset *kubernetes.Clientset if err != nil { return podsIpsWithoutNodeIP } - nodeIP := node.Status.Addresses[0].Address + nodeIPs := make([]string, 0) + for _, address := range node.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + nodeIPs = append(nodeIPs, address.Address) + } + } + for _, podIP := range podIPs { - if podIP != nodeIP { + if !contain(podIP, nodeIPs) { podsIpsWithoutNodeIP = append(podsIpsWithoutNodeIP, podIP) } } return podsIpsWithoutNodeIP } + +func contain(obj, target interface{}) bool { + targetValue := reflect.ValueOf(target) + switch reflect.TypeOf(target).Kind() { //nolint + case reflect.Slice, reflect.Array: + for i := 0; i < targetValue.Len(); i++ { + if targetValue.Index(i).Interface() == obj { + return true + } + } + } + return false +} diff --git a/test/validate/windows_validate.go b/test/validate/windows_validate.go index 9e54f61bef..2b01454ddd 100644 --- a/test/validate/windows_validate.go +++ b/test/validate/windows_validate.go @@ -19,6 +19,8 @@ const ( var ( hnsEndPpointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} + hnsEndPointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} + hnsNetworkCmd = []string{"powershell", "-c", "Get-HnsNetwork | ConvertTo-Json"} azureVnetCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet.json"} azureVnetIpamCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet-ipam.json"} ) @@ -78,6 +80,14 @@ type AddressRecord struct { InUse bool } +type check struct { + name string + stateFileIps func([]byte) (map[string]string, error) + podLabelSelector string + podNamespace string + cmd []string +} + func (w *WindowsClient) CreateClient(ctx context.Context, clienset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool) IValidator { // deploy privileged pod privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedWindowsDaemonSetPath) @@ -106,24 +116,26 @@ func (w *WindowsClient) CreateClient(ctx context.Context, clienset *kubernetes.C } func (v *WindowsValidator) ValidateStateFile() error { - checks := []struct { - name string - stateFileIps func([]byte) (map[string]string, error) - podLabelSelector string - podNamespace string - cmd []string - }{ - {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPpointCmd}, + checkSet := make(map[string][]check) // key is cni type, value is a list of check + + checkSet["cniv1"] = []check{ + {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd}, {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, {"azure-vnet-ipam", azureVnetIpamIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamCmd}, } - for _, check := range checks { - err := v.validate(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) + checkSet["cniv2"] = []check{ + {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, + } + + // this is checking all IPs of the pods with the statefile + for _, check := range checkSet[v.cni] { + err := v.validateIPs(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) if err != nil { return err } } + return nil } @@ -184,7 +196,7 @@ func azureVnetIpamIps(result []byte) (map[string]string, error) { return azureVnetIpamPodIps, nil } -func (v *WindowsValidator) validate(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { +func (v *WindowsValidator) validateIPs(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { log.Println("Validating ", checkType, " state file") nodes, err := k8sutils.GetNodeListByLabelSelector(v.ctx, v.clientset, windowsNodeSelector) if err != nil { From 4344d7e388386b80b01ca7744104f4487f7947a3 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 14:29:20 -0400 Subject: [PATCH 03/36] add linux ipv6 yamls --- .../goldpinger/cluster-role-binding.yaml | 2 +- .../manifests/goldpinger/daemonset-ipv6.yaml | 82 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 test/integration/manifests/goldpinger/daemonset-ipv6.yaml diff --git a/test/integration/manifests/goldpinger/cluster-role-binding.yaml b/test/integration/manifests/goldpinger/cluster-role-binding.yaml index c7c22e9bb3..e18b186a12 100644 --- a/test/integration/manifests/goldpinger/cluster-role-binding.yaml +++ b/test/integration/manifests/goldpinger/cluster-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: goldpinger-clusterrolebinding diff --git a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml new file mode 100644 index 0000000000..f2eaa0de03 --- /dev/null +++ b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: goldpinger-host + namespace: default +spec: + selector: + matchLabels: + app: goldpinger + type: goldpinger-host + template: + metadata: + labels: + app: goldpinger + type: goldpinger-host + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + hostNetwork: true + serviceAccount: "goldpinger-serviceaccount" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger-vm + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: IP_VERSIONS + value: "6" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "2001:4860:4860::8888 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 \ No newline at end of file From dd85072ce1738ead7b5ec10545854e55716a36db Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 14:32:28 -0400 Subject: [PATCH 04/36] add deployment yamls --- .../datapath/datapath_linux_test.go | 238 ------------------ .../datapath/linux-deployment-ipv6.yaml | 88 +++++++ .../manifests/datapath/linux-deployment.yaml | 54 ++-- test/internal/datapath/datapath_linux.go | 167 ------------ 4 files changed, 121 insertions(+), 426 deletions(-) delete mode 100644 test/integration/datapath/datapath_linux_test.go create mode 100644 test/integration/manifests/datapath/linux-deployment-ipv6.yaml delete mode 100644 test/internal/datapath/datapath_linux.go diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go deleted file mode 100644 index acf9cea2f1..0000000000 --- a/test/integration/datapath/datapath_linux_test.go +++ /dev/null @@ -1,238 +0,0 @@ -//go:build connection - -package connection - -import ( - "context" - "flag" - "fmt" - "testing" - "time" - - "github.com/Azure/azure-container-networking/test/integration" - "github.com/Azure/azure-container-networking/test/integration/goldpinger" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" - "github.com/Azure/azure-container-networking/test/internal/retry" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - LinuxDeployYamlPath = "../manifests/datapath/linux-deployment.yaml" - podLabelKey = "app" - podCount = 2 - nodepoolKey = "agentpool" - maxRetryDelaySeconds = 10 - defaultTimeoutSeconds = 120 - defaultRetryDelaySeconds = 1 - goldpingerRetryCount = 24 - goldpingerDelayTimeSeconds = 5 -) - -var ( - podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") - podNamespace = flag.String("namespace", "datapath-linux", "Namespace for test pods") - nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Node-Selector for pods") - defaultRetrier = retry.Retrier{ - Attempts: 10, - Delay: defaultRetryDelaySeconds * time.Second, - } -) - -/* -This test assumes that you have the current credentials loaded in your default kubeconfig for a -k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. -*** The expected nodepool name is npwin, if the nodepool has a diferent name ensure that you change nodepoolSelector with: - -nodepoolSelector="yournodepoolname" - -To run the test use one of the following commands: -go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection - or -go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=npwina -tags=connection - - -This test checks pod to pod, pod to node, and pod to internet for datapath connectivity. - -Timeout context is controled by the -timeout flag. - -*/ - -func TestDatapathLinux(t *testing.T) { - ctx := context.Background() - - t.Log("Create Clientset") - clientset, err := k8sutils.MustGetClientset() - if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) - } - t.Log("Get REST config") - restConfig := k8sutils.MustGetRestConfig(t) - - t.Log("Create Label Selectors") - - podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) - nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) - - t.Log("Get Nodes") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) - if err != nil { - require.NoError(t, err, "could not get k8s node list: %v", err) - } - - // Test Namespace - t.Log("Create Namespace") - err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) - createPodFlag := !(apierrors.IsAlreadyExists(err)) - t.Logf("%v", createPodFlag) - - if createPodFlag { - t.Log("Creating Linux pods through deployment") - deployment, err := k8sutils.MustParseDeployment(LinuxDeployYamlPath) - if err != nil { - require.NoError(t, err) - } - - // Fields for overwritting existing deployment yaml. - // Defaults from flags will not change anything - deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix - deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix - deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector - deployment.Name = *podPrefix - deployment.Namespace = *podNamespace - - t.Logf("deployment Spec Template is %+v", deployment.Spec.Template) - deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) - if err != nil { - require.NoError(t, err) - } - t.Logf("podNamespace is %s", *podNamespace) - t.Logf("podLabelSelector is %s", podLabelSelector) - - t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - require.NoError(t, err) - } - t.Log("Successfully created customer linux pods") - } else { - // Checks namespace already exists from previous attempt - t.Log("Namespace already exists") - - t.Log("Checking for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - require.NoError(t, err) - } - } - t.Log("Checking Linux test environment") - for _, node := range nodes.Items { - - pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) - if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) - } - if len(pods.Items) <= 1 { - t.Logf("%s", node.Name) - require.NoError(t, errors.New("Less than 2 pods on node")) - } - } - t.Log("Linux test environment ready") - - t.Run("Linux ping tests", func(t *testing.T) { - // Check goldpinger health - t.Run("all pods have IPs assigned", func(t *testing.T) { - podsClient := clientset.CoreV1().Pods(*podNamespace) - - checkPodIPsFn := func() error { - podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) - t.Logf("podList is %+v", podList) - if err != nil { - return err - } - - if len(podList.Items) == 0 { - return errors.New("no pods scheduled") - } - - for _, pod := range podList.Items { - if pod.Status.Phase == apiv1.PodPending { - return errors.New("some pods still pending") - } - } - - for _, pod := range podList.Items { - if pod.Status.PodIP == "" { - return errors.New("a pod has not been allocated an IP") - } - } - - return nil - } - err := defaultRetrier.Do(ctx, checkPodIPsFn) - if err != nil { - t.Fatalf("not all pods were allocated IPs: %v", err) - } - t.Log("all pods have been allocated IPs") - }) - - t.Run("all linux pods can ping each other", func(t *testing.T) { - pfOpts := k8s.PortForwardingOpts{ - Namespace: "default", - LabelSelector: "type=goldpinger-pod", - LocalPort: 9090, - DestPort: 8080, - } - - pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) - if err != nil { - t.Fatal(err) - } - - portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) - defer cancel() - - portForwardFn := func() error { - err := pf.Forward(portForwardCtx) - if err != nil { - t.Logf("unable to start port forward: %v", err) - return err - } - return nil - } - if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { - t.Fatalf("could not start port forward within %ds: %v", defaultTimeoutSeconds, err) - } - defer pf.Stop() - - gpClient := goldpinger.Client{Host: pf.Address()} - - clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) - defer cancel() - clusterCheckFn := func() error { - clusterState, err := gpClient.CheckAll(clusterCheckCtx) - if err != nil { - return err - } - - stats := goldpinger.ClusterStats(clusterState) - stats.PrintStats() - if stats.AllPingsHealthy() { - return nil - } - - return errors.New("not all pings are healthy") - } - retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} - if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { - t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) - } - - t.Log("all pings successful!") - }) - }) -} diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml new file mode 100644 index 0000000000..53f4ec5d54 --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: default +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: IP_VERSIONS + value: "6" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "2001:4860:4860::8888 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml index 4b029c28c3..2ea74ecfcc 100644 --- a/test/integration/manifests/datapath/linux-deployment.yaml +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: goldpinger-deploy - namespace: datapath-linux + namespace: default spec: replicas: 4 selector: @@ -13,23 +13,6 @@ spec: labels: app: goldpinger spec: - serviceAccount: "goldpinger-serviceaccount" - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - goldpinger - topologyKey: "kubernetes.io/hostname" containers: - name: goldpinger env: @@ -37,6 +20,14 @@ spec: value: "0.0.0.0" - name: PORT value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" - name: HOSTNAME valueFrom: fieldRef: @@ -45,10 +36,31 @@ spec: valueFrom: fieldRef: fieldPath: status.podIP - image: "docker.io/bloomberg/goldpinger:v3.0.0" + - name: HOSTS_TO_RESOLVE + value: "1.1.1.1 8.8.8.8 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule securityContext: allowPrivilegeEscalation: false - readOnlyRootFilesystem: true + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" resources: limits: memory: 80Mi @@ -71,4 +83,4 @@ spec: initialDelaySeconds: 5 periodSeconds: 5 nodeSelector: - kubernetes.io/os: linux + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/internal/datapath/datapath_linux.go b/test/internal/datapath/datapath_linux.go deleted file mode 100644 index f32d4cda3f..0000000000 --- a/test/internal/datapath/datapath_linux.go +++ /dev/null @@ -1,167 +0,0 @@ -package datapath - -import ( - "context" - "fmt" - - "github.com/Azure/azure-container-networking/test/internal/k8sutils" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" -) - -func LinuxPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { - logrus.Infof("Get Pods for Linux Node: %s", nodeName) - pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) - if err != nil { - logrus.Error(err) - return errors.Wrap(err, "k8s api call") - } - if len(pods.Items) <= 1 { - return errors.New("Less than 2 pods on node") - } - - // Get first pod on this node - firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) - } - logrus.Infof("First pod: %v %v", firstPod.Name, firstPod.Status.PodIP) - - // Get the second pod on this node - secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) - } - logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) - - // Ping the second pod from the first pod - return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) -} - -// func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName1, nodeName2, podNamespace, labelSelector string, rc *restclient.Config) error { -// logrus.Infof("Get Pods for Node 1: %s", nodeName1) -// // Node 1 -// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName1) -// if err != nil { -// logrus.Error(err) -// return errors.Wrap(err, "k8s api call") -// } -// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) -// } -// logrus.Infof("First pod: %v %v", firstPod.Name, firstPod.Status.PodIP) - -// logrus.Infof("Get Pods for Node 2: %s", nodeName2) -// // Node 2 -// pods, err = k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName2) -// if err != nil { -// logrus.Error(err) -// return errors.Wrap(err, "k8s api call") -// } -// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) -// } -// logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) - -// // Ping the second pod from the first pod located on different nodes -// return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) -// } - -// func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, nodeIP, podNamespace, labelSelector string, rc *restclient.Config) error { -// logrus.Infof("Get Pods by Node: %s %s", nodeName, nodeIP) -// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) -// if err != nil { -// logrus.Error(err) -// return errors.Wrap(err, "k8s api call") -// } -// if len(pods.Items) <= 1 { -// return errors.New("Less than 2 pods on node") -// } -// // Get first pod on this node -// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) -// } -// logrus.Infof("First pod: %v", firstPod.Name) - -// // Get the second pod on this node -// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) -// } -// logrus.Infof("Second pod: %v", secondPod.Name) - -// // Ping from pod to node -// resultOne := podTest(ctx, clientset, firstPod, []string{"ping", nodeIP}, rc, pingPassedWindows) -// resultTwo := podTest(ctx, clientset, secondPod, []string{"ping", nodeIP}, rc, pingPassedWindows) - -// if resultOne != nil { -// return resultOne -// } - -// if resultTwo != nil { -// return resultTwo -// } - -// return nil -// } - -// func WindowsPodToInternet(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { -// logrus.Infof("Get Pods by Node: %s", nodeName) -// pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) -// if err != nil { -// logrus.Error(err) -// return errors.Wrap(err, "k8s api call") -// } -// if len(pods.Items) <= 1 { -// return errors.New("Less than 2 pods on node") -// } - -// // Get first pod on this node -// firstPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[0].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", firstPod.Name, err)) -// } -// logrus.Infof("First pod: %v", firstPod.Name) - -// // Get the second pod on this node -// secondPod, err := clientset.CoreV1().Pods(podNamespace).Get(ctx, pods.Items[1].Name, metav1.GetOptions{}) -// if err != nil { -// return errors.Wrap(err, fmt.Sprintf("Getting pod %s failed with %v", secondPod.Name, err)) -// } -// logrus.Infof("Second pod: %v", secondPod.Name) - -// resultOne := podTest(ctx, clientset, firstPod, []string{"powershell", "Invoke-WebRequest", "www.bing.com", "-UseBasicParsing"}, rc, webRequestPassedWindows) -// resultTwo := podTest(ctx, clientset, secondPod, []string{"powershell", "Invoke-WebRequest", "www.bing.com", "-UseBasicParsing"}, rc, webRequestPassedWindows) - -// if resultOne != nil { -// return resultOne -// } - -// if resultTwo != nil { -// return resultTwo -// } - -// return nil -// } - -// func webRequestPassedWindows(output string) error { -// const searchString = "200 OK" -// if strings.Contains(output, searchString) { -// return nil -// } -// return errors.Wrapf(errors.New("Output did not contain \"200 OK\""), "output was: %s", output) -// } - -// func pingPassedWindows(output string) error { -// const searchString = "0% loss" -// if strings.Contains(output, searchString) { -// return nil -// } -// return errors.Wrapf(errors.New("Ping did not contain\"0% loss\""), "output was: %s", output) -// } From 341ac63aef931fcacd515b4a6efcc490824163eb Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 14:36:02 -0400 Subject: [PATCH 05/36] remove duplicated linux deloyment files --- .../datapath/linux-deployment-ipv6.yaml | 88 ------------------- .../manifests/datapath/linux-deployment.yaml | 86 ------------------ 2 files changed, 174 deletions(-) delete mode 100644 test/integration/manifests/datapath/linux-deployment-ipv6.yaml delete mode 100644 test/integration/manifests/datapath/linux-deployment.yaml diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml deleted file mode 100644 index 53f4ec5d54..0000000000 --- a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: goldpinger-deploy - namespace: default -spec: - replicas: 4 - selector: - matchLabels: - app: goldpinger - template: - metadata: - labels: - app: goldpinger - spec: - containers: - - name: goldpinger - env: - - name: HOST - value: "0.0.0.0" - - name: PORT - value: "8080" - - name: PING_TIMEOUT - value: "10s" - - name: CHECK_TIMEOUT - value: "20s" - - name: CHECK_ALL_TIMEOUT - value: "20s" - - name: DNS_TARGETS_TIMEOUT - value: "10s" - - name: IP_VERSIONS - value: "6" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: HOSTS_TO_RESOLVE - value: "2001:4860:4860::8888 www.bing.com" - image: "docker.io/bloomberg/goldpinger:v3.7.0" - serviceAccount: goldpinger-serviceaccount - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - securityContext: - allowPrivilegeEscalation: false - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - goldpinger - topologyKey: "kubernetes.io/hostname" - resources: - limits: - memory: 80Mi - requests: - cpu: 1m - memory: 40Mi - ports: - - containerPort: 8080 - name: http - readinessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - nodeSelector: - kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml deleted file mode 100644 index 2ea74ecfcc..0000000000 --- a/test/integration/manifests/datapath/linux-deployment.yaml +++ /dev/null @@ -1,86 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: goldpinger-deploy - namespace: default -spec: - replicas: 4 - selector: - matchLabels: - app: goldpinger - template: - metadata: - labels: - app: goldpinger - spec: - containers: - - name: goldpinger - env: - - name: HOST - value: "0.0.0.0" - - name: PORT - value: "8080" - - name: PING_TIMEOUT - value: "10s" - - name: CHECK_TIMEOUT - value: "20s" - - name: CHECK_ALL_TIMEOUT - value: "20s" - - name: DNS_TARGETS_TIMEOUT - value: "10s" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: HOSTS_TO_RESOLVE - value: "1.1.1.1 8.8.8.8 www.bing.com" - image: "docker.io/bloomberg/goldpinger:v3.7.0" - serviceAccount: goldpinger-serviceaccount - tolerations: - - key: node-role.kubernetes.io/master - effect: NoSchedule - securityContext: - allowPrivilegeEscalation: false - securityContext: - runAsNonRoot: true - runAsUser: 1000 - fsGroup: 2000 - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - goldpinger - topologyKey: "kubernetes.io/hostname" - resources: - limits: - memory: 80Mi - requests: - cpu: 1m - memory: 40Mi - ports: - - containerPort: 8080 - name: http - readinessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - livenessProbe: - httpGet: - path: /healthz - port: 8080 - initialDelaySeconds: 5 - periodSeconds: 5 - nodeSelector: - kubernetes.io/os: linux \ No newline at end of file From 297b3f4bda722bbe67a9c8100e01b695493a2874 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 14:48:17 -0400 Subject: [PATCH 06/36] add linux datapath --- .../datapath/dapapath_linux_test.go | 325 ++++++++++++++++++ test/integration/goldpinger/client.go | 1 + .../datapath/linux-deployment-ipv6.yaml | 88 +++++ .../manifests/datapath/linux-deployment.yaml | 86 +++++ 4 files changed, 500 insertions(+) create mode 100644 test/integration/datapath/dapapath_linux_test.go create mode 100644 test/integration/manifests/datapath/linux-deployment-ipv6.yaml create mode 100644 test/integration/manifests/datapath/linux-deployment.yaml diff --git a/test/integration/datapath/dapapath_linux_test.go b/test/integration/datapath/dapapath_linux_test.go new file mode 100644 index 0000000000..a0a4ab9a38 --- /dev/null +++ b/test/integration/datapath/dapapath_linux_test.go @@ -0,0 +1,325 @@ +//go:build connection + +package connection + +import ( + "context" + "flag" + "fmt" + "net" + "os" + "testing" + "time" + + "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/goldpinger" + k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/retry" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + appsv1 "k8s.io/api/apps/v1" + apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml" + podLabelKey = "app" + podCount = 2 + nodepoolKey = "agentpool" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + goldpingerRetryCount = 24 + goldpingerDelayTimeSeconds = 5 + gpFolder = "../manifests/goldpinger" + gpClusterRolePath = gpFolder + "/cluster-role.yaml" + gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" + gpServiceAccountPath = gpFolder + "/service-account.yaml" + gpDaemonset = gpFolder + "/daemonset.yaml" + gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" + gpDeployment = gpFolder + "/deployment.yaml" +) + +var ( + podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Node-Selector for pods") + testProfile = flag.String("testName", LinuxDeployIPV4, "Linux datapath test profile") + defaultRetrier = retry.Retrier{ + Attempts: 10, + Delay: defaultRetryDelaySeconds * time.Second, + } +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is npwin, if the nodepool has a diferent name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +To run the test use one of the following commands: +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + or +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=aks-pool1 -tags=connection,integration + + +This test checks pod to pod, pod to node, pod to Internet check + +Timeout context is controled by the -timeout flag. + +*/ + +// return podLabelSelector and nodeLabelSelector +func createLabelSelectors() (string, string) { + return fmt.Sprintf("%s=%s", podLabelKey, *podPrefix), fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) +} + +func setupLinuxEnvironment(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset, err := k8sutils.MustGetClientset() + if err != nil { + require.NoError(t, err, "could not get k8s clientset: %v", err) + } + + t.Log("Create Label Selectors") + podLabelSelector, nodeLabelSelector := createLabelSelectors() + + t.Log("Get Nodes") + nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + require.NoError(t, err, "could not get k8s node list: %v", err) + } + + createPodFlag := !(apierrors.IsAlreadyExists(err)) + t.Logf("%v", createPodFlag) + + if createPodFlag { + var daemonset appsv1.DaemonSet + t.Log("Creating Linux pods through deployment") + deployment, err := k8sutils.MustParseDeployment(*testProfile) + if err != nil { + require.NoError(t, err) + } + + if *testProfile == LinuxDeployIPV4 { + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) + if err != nil { + t.Fatal(err) + } + } else { + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) + if err != nil { + t.Fatal(err) + } + } + + rbacCleanUpFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + if err != nil { + t.Log(os.Getwd()) + t.Fatal(err) + } + + // Fields for overwritting existing deployment yaml. + // Defaults from flags will not change anything + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + deployment.Name = *podPrefix + deployment.Namespace = *podNamespace + + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + if err != nil { + require.NoError(t, err) + } + + daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) + err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + t.Log("cleaning up resources") + rbacCleanUpFn() + + if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + + if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + }) + + t.Log("Waiting for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + require.NoError(t, err) + } + t.Log("Successfully created customer linux pods") + } else { + t.Log("Checking for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + require.NoError(t, err) + } + } + + t.Log("Checking Linux test environment") + for _, node := range nodes.Items { + pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + require.NoError(t, err, "could not get k8s clientset: %v", err) + } + if len(pods.Items) <= 1 { + t.Logf("%s", node.Name) + require.NoError(t, errors.New("Less than 2 pods on node")) + } + + } + t.Log("Linux test environment ready") +} + +func TestDatapathLinux(t *testing.T) { + ctx := context.Background() + + t.Log("Get REST config") + restConfig := k8sutils.MustGetRestConfig(t) + + t.Log("Create Clientset") + clientset, _ := k8sutils.MustGetClientset() + + setupLinuxEnvironment(t) + podLabelSelector, _ := createLabelSelectors() + + t.Run("Linux ping tests", func(t *testing.T) { + // Check goldpinger health + t.Run("all pods have IPs assigned", func(t *testing.T) { + podsClient := clientset.CoreV1().Pods(*podNamespace) + + checkPodIPsFn := func() error { + podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) + if err != nil { + return err + } + + if len(podList.Items) == 0 { + return errors.New("no pods scheduled") + } + + for _, pod := range podList.Items { + if pod.Status.Phase == apiv1.PodPending { + return errors.New("some pods still pending") + } + } + + for _, pod := range podList.Items { + if pod.Status.PodIP == "" { + return errors.New("a pod has not been allocated an IP") + } + } + + return nil + } + err := defaultRetrier.Do(ctx, checkPodIPsFn) + if err != nil { + t.Fatalf("not all pods were allocated IPs: %v", err) + } + t.Log("all pods have been allocated IPs") + }) + + // TODO: avoid using yaml file path to control test case + if *testProfile == LinuxDeployIPv6 { + t.Run("Linux dualstack overlay tests", func(t *testing.T) { + t.Run("test dualstack overlay", func(t *testing.T) { + podsClient := clientset.CoreV1().Pods(*podNamespace) + + checkPodIPsFn := func() error { + podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) + if err != nil { + return err + } + + for _, pod := range podList.Items { + podIPs := pod.Status.PodIPs + if len(podIPs) < 2 { + return errors.New("a pod only gets one IP") + } + if net.ParseIP(podIPs[0].IP).To4() == nil || net.ParseIP(podIPs[1].IP).To16() == nil { + return errors.New("a pod does not have both ipv4 and ipv6 address") + } + } + return nil + } + err := defaultRetrier.Do(ctx, checkPodIPsFn) + if err != nil { + t.Fatalf("dualstack overlay pod properties check is failed due to: %v", err) + } + + t.Log("all dualstack linux pods properties have been verified") + }) + }) + } + + t.Run("all linux pods can ping each other", func(t *testing.T) { + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + pfOpts := k8s.PortForwardingOpts{ + Namespace: *podNamespace, + LabelSelector: podLabelSelector, + LocalPort: 9090, + DestPort: 8080, + } + + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + if err != nil { + t.Fatal(err) + } + + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + portForwardFn := func() error { + err := pf.Forward(portForwardCtx) + if err != nil { + t.Logf("unable to start port forward: %v", err) + return err + } + return nil + } + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + t.Fatalf("could not start port forward within %ds: %v", defaultTimeoutSeconds, err) + } + defer pf.Stop() + + gpClient := goldpinger.Client{Host: pf.Address()} + clusterCheckFn := func() error { + clusterState, err := gpClient.CheckAll(clusterCheckCtx) + if err != nil { + return err + } + stats := goldpinger.ClusterStats(clusterState) + stats.PrintStats() + if stats.AllPingsHealthy() { + return nil + } + + return errors.New("not all pings are healthy") + } + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) + } + + t.Log("all pings successful!") + }) + }) +} diff --git a/test/integration/goldpinger/client.go b/test/integration/goldpinger/client.go index 49b29d9686..dac4149ced 100644 --- a/test/integration/goldpinger/client.go +++ b/test/integration/goldpinger/client.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package goldpinger diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml new file mode 100644 index 0000000000..53f4ec5d54 --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: default +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: IP_VERSIONS + value: "6" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "2001:4860:4860::8888 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml new file mode 100644 index 0000000000..2ea74ecfcc --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: default +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "1.1.1.1 8.8.8.8 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file From b15314a6968551f43bd64fff7dcc33a7f3b181d7 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 16:08:56 -0400 Subject: [PATCH 07/36] add windows test --- test/integration/datapath/datapath_windows_test.go | 6 ++++-- test/validate/windows_validate.go | 2 -- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index ccc8f82f19..76f1197215 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -59,8 +59,7 @@ func TestDatapathWin(t *testing.T) { restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) - nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) + podLabelSelector, nodeLabelSelector := createLabelSelectors() t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) @@ -71,6 +70,9 @@ func TestDatapathWin(t *testing.T) { // Test Namespace t.Log("Create Namespace") err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + if err != nil { + require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) + } createPodFlag := !(apierrors.IsAlreadyExists(err)) if createPodFlag { diff --git a/test/validate/windows_validate.go b/test/validate/windows_validate.go index 2b01454ddd..ab9a0f2aed 100644 --- a/test/validate/windows_validate.go +++ b/test/validate/windows_validate.go @@ -18,9 +18,7 @@ const ( ) var ( - hnsEndPpointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} hnsEndPointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} - hnsNetworkCmd = []string{"powershell", "-c", "Get-HnsNetwork | ConvertTo-Json"} azureVnetCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet.json"} azureVnetIpamCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet-ipam.json"} ) From e9faea0f2f726913cbc20857f6ebc34319f79954 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 18:55:52 -0400 Subject: [PATCH 08/36] change datapath windows file name --- test/internal/datapath/{datapath_windows.go => datapath_win.go} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/internal/datapath/{datapath_windows.go => datapath_win.go} (100%) diff --git a/test/internal/datapath/datapath_windows.go b/test/internal/datapath/datapath_win.go similarity index 100% rename from test/internal/datapath/datapath_windows.go rename to test/internal/datapath/datapath_win.go From 7e634a0c109ea30881461ab1b93f3618c18eccef Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 19:09:12 -0400 Subject: [PATCH 09/36] fix datapath windows test --- test/integration/datapath/datapath_windows_test.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index 76f1197215..c429d1a95f 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -5,7 +5,7 @@ package connection import ( "context" "flag" - "fmt" + "net" "testing" "github.com/Azure/azure-container-networking/test/internal/datapath" @@ -59,7 +59,8 @@ func TestDatapathWin(t *testing.T) { restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector, nodeLabelSelector := createLabelSelectors() + podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) + nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) From d5647454cef58b7d1d7656f4ac831978fcf95fbe Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Fri, 21 Jul 2023 19:09:33 -0400 Subject: [PATCH 10/36] fix datapath windows test --- test/integration/datapath/datapath_windows_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index c429d1a95f..f959daf645 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -5,6 +5,7 @@ package connection import ( "context" "flag" + "fmt" "net" "testing" From 004e2d7022ab57bc7c4f1a6d07ac69d1a5c69c65 Mon Sep 17 00:00:00 2001 From: Paul Yu <129891899+paulyufan2@users.noreply.github.com> Date: Mon, 24 Jul 2023 15:21:13 -0400 Subject: [PATCH 11/36] scripts to cleanup ovs bridge and ovs leaked rules (#2066) --- hack/scripts/ovs_scripts/README.md | 20 +++++ .../scripts/ovs_scripts/remove_ovs_bridges.py | 85 +++++++++++++++++++ .../ovs_scripts/remove_ovs_leaked_rules.py | 43 ++++++++++ 3 files changed, 148 insertions(+) create mode 100644 hack/scripts/ovs_scripts/README.md create mode 100644 hack/scripts/ovs_scripts/remove_ovs_bridges.py create mode 100644 hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py diff --git a/hack/scripts/ovs_scripts/README.md b/hack/scripts/ovs_scripts/README.md new file mode 100644 index 0000000000..89f6632e80 --- /dev/null +++ b/hack/scripts/ovs_scripts/README.md @@ -0,0 +1,20 @@ +# ovs_scripts + +`remove_ovs_bridges.py` script is for removing ovs switch(azure0) and the and openflow rules configured with it +ssh connection will not be lost when running script +It will get all existing ovs bridges and delete them and then delete CNI state file +and all interfaces starting with `az` that are used for supporting apipa connectivity. After that, +it will bring back VM to original state with eth0 as primary interface and +check if Linux VM internet connectivity is still working. + +`remove_ovs_leaked_rules.py` script is for removeing all leaked ovs rules +It will check ovs flow dumps and filter which ports are being used. Then delete these ovs rules that +are not associated with used ports. + +To run these script, clone scripts to Linux VM with ovs and have Python3 environment ready: +paulyu@paul-microsoft:~$ which python3 +/usr/bin/python3 + +Run script: +python3 remove_ovs_bridges.py +python3 remove_ovs_leaked_rules.py diff --git a/hack/scripts/ovs_scripts/remove_ovs_bridges.py b/hack/scripts/ovs_scripts/remove_ovs_bridges.py new file mode 100644 index 0000000000..fef1097ad6 --- /dev/null +++ b/hack/scripts/ovs_scripts/remove_ovs_bridges.py @@ -0,0 +1,85 @@ +import subprocess +import os +import re +import requests + +# step 1: get all ovs bridges: +try: + ovsBridgeShow = subprocess.Popen(['ovs-vsctl', 'list-br'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) +except subprocess.CalledProcessError: + print("failed to execute ovs-vsctl show command") + os.Exit(1) + +stdout = ovsBridgeShow.communicate() +ovsBridgeList = stdout[0].decode("utf-8".strip()).split('\n') + +# step 2: remove all ovs bridges +for bridge in ovsBridgeList: + if bridge != "": + deleteCommand = "ovs-vsctl del-br %s"%bridge + try: + print("deleting ovs bridge by: ", deleteCommand) + os.system(deleteCommand) + except: + print("failed to delete all ovs bridges") + +# step 3: reset vSwitch configuration to clean state and delete manager +try: + os.system("ovs-vsctl del-manager") + os.system("ovs-vsctl emer-reset") +except: + print("failed to reset vSwitch configuration and delete manager") + +# step 4: check if ovs flows exist anymore +try: + ovsDPCtlShow = subprocess.Popen(['ovs-dpctl', 'show'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) +except subprocess.CalledProcessError: + print("failed to execute ovs-dpctl show command") + os.Exit(1) + +stdout = ovsDPCtlShow.communicate() +if stdout[0].decode("utf-8") != "": + print("ovs flows still exist, please check if all ovs bridges are removed from system") + os.Exit(1) + +# step 5: delete cni state file: +cniStatePath = "/var/run/azure-vnet.json" +if os.path.exists(cniStatePath): + try: + os.system("rm /var/run/azure-vnet.json") + except: + print("failed to delete cni state file") + os.Exit(1) + +# step 6: delete az* interfaces as supporting for apipa connectivity +try: + ovsBridgeShow = subprocess.Popen(['ls', '/sys/class/net'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) +except subprocess.CalledProcessError: + print("failed to execute get all interfaces command") + os.Exit(1) + +stdout = ovsBridgeShow.communicate() +for interface in stdout[0].decode("utf-8").split('\n'): + if interface.startswith("az"): + try: + ovsBridgeShow = subprocess.Popen(['ip', 'link', 'delete', interface], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + print("failed to delete interface: ", interface) + os.Exit(1) + +# step 7: check internet connectivity after ovs bridges are removed +url = "http://www.bing.com" +timeout = 5 +try: + request = requests.get(url, timeout=timeout) + print("Connected to the Internet") +except (requests.ConnectionError, requests.Timeout) as exception: + print("No internet connection.") \ No newline at end of file diff --git a/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py b/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py new file mode 100644 index 0000000000..6a6e57d801 --- /dev/null +++ b/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py @@ -0,0 +1,43 @@ +import subprocess +import re +import os + +# step 1: get ovs-dpctl show out to make sure which ports are being used +try: + ovsDPCtlShow = subprocess.Popen(['ovs-dpctl', 'show'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) +except subprocess.CalledProcessError: + print("failed to execute ovs-dpctl show command") + os.Exit(1) + +stdout = ovsDPCtlShow.communicate() + +usedPortList = re.findall("port (\d+)", str(stdout)) + +# Step 2: Check ovs flows dumps +try: + ovsDumpFlows = subprocess.Popen(['ovs-ofctl', 'dump-flows', 'azure0'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) +except subprocess.CalledProcessError: + print("failed to execute ovs-ofctl dump-flows command") + os.Exit(1) + +stdout = ovsDumpFlows.communicate() +allPortList = re.findall("in_port=(\d+)", str(stdout)) + +unUsedPortList = [] +for port in allPortList: + if port not in usedPortList: + unUsedPortList.append(port) + +# Step 3: delete leaked rules +# only use unused ports +for port in unUsedPortList: + deleteCommand = "ovs-ofctl del-flows azure0 ip,in_port=%s"%port + try: + os.system(deleteCommand) + except: + print("delete command %s does not work"%deleteCommand) + os.Exit(1) \ No newline at end of file From 5a40bff661482e268bc87aa2bec410b764c77b4f Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Mon, 24 Jul 2023 18:49:18 -0400 Subject: [PATCH 12/36] fix comments --- ...h_linux_test.go => datapath_linux_test.go} | 92 +++++++++++-------- .../datapath/datapath_windows_test.go | 27 +++--- test/integration/k8s_test.go | 2 +- .../datapath/linux-deployment-ipv6.yaml | 2 +- .../manifests/datapath/linux-deployment.yaml | 2 +- .../goldpinger/cluster-role-binding.yaml | 2 +- .../manifests/goldpinger/daemonset-ipv6.yaml | 2 +- .../manifests/goldpinger/daemonset.yaml | 2 +- .../manifests/goldpinger/deployment.yaml | 2 +- .../manifests/goldpinger/service-account.yaml | 2 +- .../manifests/goldpinger/service.yaml | 2 +- test/internal/k8sutils/utils.go | 6 ++ 12 files changed, 86 insertions(+), 57 deletions(-) rename test/integration/datapath/{dapapath_linux_test.go => datapath_linux_test.go} (80%) diff --git a/test/integration/datapath/dapapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go similarity index 80% rename from test/integration/datapath/dapapath_linux_test.go rename to test/integration/datapath/datapath_linux_test.go index a0a4ab9a38..b1f875b9f1 100644 --- a/test/integration/datapath/dapapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -5,7 +5,6 @@ package connection import ( "context" "flag" - "fmt" "net" "os" "testing" @@ -20,16 +19,15 @@ import ( appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( - LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" - LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml" podLabelKey = "app" podCount = 2 nodepoolKey = "agentpool" + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml" maxRetryDelaySeconds = 10 defaultTimeoutSeconds = 120 defaultRetryDelaySeconds = 1 @@ -46,10 +44,11 @@ const ( var ( podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") - podNamespace = flag.String("namespace", "default", "Namespace for test pods") - nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Node-Selector for pods") - testProfile = flag.String("testName", LinuxDeployIPV4, "Linux datapath test profile") - defaultRetrier = retry.Retrier{ + podNamespace = flag.String("namespace", "linux-datapath-test", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Linux Node-Selector for pods") + // TODO: add flag to support dual nic scenario + isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") + defaultRetrier = retry.Retrier{ Attempts: 10, Delay: defaultRetryDelaySeconds * time.Second, } @@ -58,8 +57,7 @@ var ( /* This test assumes that you have the current credentials loaded in your default kubeconfig for a k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. -*** The expected nodepool name is npwin, if the nodepool has a diferent name ensure that you change nodepoolSelector with: - -nodepoolSelector="yournodepoolname" +*** The expected nodepool name is a random Linux nodepool name To run the test use one of the following commands: go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration @@ -73,11 +71,6 @@ Timeout context is controled by the -timeout flag. */ -// return podLabelSelector and nodeLabelSelector -func createLabelSelectors() (string, string) { - return fmt.Sprintf("%s=%s", podLabelKey, *podPrefix), fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) -} - func setupLinuxEnvironment(t *testing.T) { ctx := context.Background() @@ -88,7 +81,10 @@ func setupLinuxEnvironment(t *testing.T) { } t.Log("Create Label Selectors") - podLabelSelector, nodeLabelSelector := createLabelSelectors() + podLabelSelector, nodeLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) + + t.Logf("nodeLabelSelector IS %s", nodeLabelSelector) + t.Logf("podLabelSelector uis %s", podLabelSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) @@ -96,24 +92,42 @@ func setupLinuxEnvironment(t *testing.T) { require.NoError(t, err, "could not get k8s node list: %v", err) } - createPodFlag := !(apierrors.IsAlreadyExists(err)) - t.Logf("%v", createPodFlag) + // Create namespace if it doesn't exist + namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) + if err != nil { + require.NoError(t, err, "failed to check if namespace %s exists due to: %v", *podNamespace, err) + } - if createPodFlag { - var daemonset appsv1.DaemonSet - t.Log("Creating Linux pods through deployment") - deployment, err := k8sutils.MustParseDeployment(*testProfile) + if !namespaceExists { + // Test Namespace + t.Log("Create Namespace") + err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) if err != nil { - require.NoError(t, err) + require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) } - if *testProfile == LinuxDeployIPV4 { - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) + var daemonset appsv1.DaemonSet + var deployment appsv1.Deployment + t.Log("Creating Linux pods through deployment") + + // run goldpinger ipv4 and ipv6 test cases saperately + if *isDualStack { + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) + if err != nil { + require.NoError(t, err) + } + + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) if err != nil { t.Fatal(err) } } else { - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) + if err != nil { + require.NoError(t, err) + } + + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) if err != nil { t.Fatal(err) } @@ -163,11 +177,15 @@ func setupLinuxEnvironment(t *testing.T) { if err != nil { require.NoError(t, err) } - t.Log("Successfully created customer linux pods") + + if *isDualStack { + t.Log("Successfully created customer dualstack Linux pods") + } else { + t.Log("Successfully created customer singlestack Linux pods") + } } else { - t.Log("Checking for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { + // delete namespace + if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { require.NoError(t, err) } } @@ -197,14 +215,17 @@ func TestDatapathLinux(t *testing.T) { clientset, _ := k8sutils.MustGetClientset() setupLinuxEnvironment(t) - podLabelSelector, _ := createLabelSelectors() + t.Logf("nodepoolSelector is %s", *nodepoolSelector) + t.Logf("podPrefix is %s", *podPrefix) + podLabelSelector, _ := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) + t.Logf("podLabelSelector is %s", podLabelSelector) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health t.Run("all pods have IPs assigned", func(t *testing.T) { podsClient := clientset.CoreV1().Pods(*podNamespace) - checkPodIPsFn := func() error { + _ = func() error { podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) if err != nil { return err @@ -228,15 +249,14 @@ func TestDatapathLinux(t *testing.T) { return nil } - err := defaultRetrier.Do(ctx, checkPodIPsFn) + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { - t.Fatalf("not all pods were allocated IPs: %v", err) + require.NoError(t, err) } t.Log("all pods have been allocated IPs") }) - // TODO: avoid using yaml file path to control test case - if *testProfile == LinuxDeployIPv6 { + if *isDualStack { t.Run("Linux dualstack overlay tests", func(t *testing.T) { t.Run("test dualstack overlay", func(t *testing.T) { podsClient := clientset.CoreV1().Pods(*podNamespace) diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index f959daf645..b7d45200b4 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -5,7 +5,6 @@ package connection import ( "context" "flag" - "fmt" "net" "testing" @@ -14,7 +13,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" ) const ( @@ -26,8 +24,8 @@ const ( var ( podPrefix = flag.String("podName", "datapod", "Prefix for test pods") - podNamespace = flag.String("namespace", "datapath-win", "Namespace for test pods") - nodepoolSelector = flag.String("nodepoolSelector", "npwin", "Provides nodepool as a Node-Selector for pods") + podNamespace = flag.String("namespace", "windows-datapath-test", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "npwin", "Provides nodepool as a windows Node-Selector for pods") ) /* @@ -60,8 +58,7 @@ func TestDatapathWin(t *testing.T) { restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) - nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) + podLabelSelector, nodeLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) @@ -69,15 +66,20 @@ func TestDatapathWin(t *testing.T) { require.NoError(t, err, "could not get k8s node list: %v", err) } - // Test Namespace - t.Log("Create Namespace") - err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + // Create namespace if it doesn't exist + namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) if err != nil { - require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) + require.NoError(t, err, "failed to check if namespace %s exists due to: %v", *podNamespace, err) } - createPodFlag := !(apierrors.IsAlreadyExists(err)) - if createPodFlag { + if !namespaceExists { + // Test Namespace + t.Log("Create Namespace") + err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + if err != nil { + require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) + } + t.Log("Creating Windows pods through deployment") deployment, err := k8sutils.MustParseDeployment(WindowsDeployYamlPath) if err != nil { @@ -114,6 +116,7 @@ func TestDatapathWin(t *testing.T) { require.NoError(t, err) } } + t.Log("Checking Windows test environment") for _, node := range nodes.Items { diff --git a/test/integration/k8s_test.go b/test/integration/k8s_test.go index e97995197e..a7783023b8 100644 --- a/test/integration/k8s_test.go +++ b/test/integration/k8s_test.go @@ -188,7 +188,7 @@ func TestPodScaling(t *testing.T) { defer cancel() pfOpts := PortForwardingOpts{ - Namespace: "default", + Namespace: "linux-datapath-test", LabelSelector: "type=goldpinger-pod", LocalPort: 9090, DestPort: 8080, diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml index 53f4ec5d54..e20de1df91 100644 --- a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml +++ b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: goldpinger-deploy - namespace: default + namespace: linux-datapath-test spec: replicas: 4 selector: diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml index 2ea74ecfcc..1e4d3433cc 100644 --- a/test/integration/manifests/datapath/linux-deployment.yaml +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: goldpinger-deploy - namespace: default + namespace: linux-datapath-test spec: replicas: 4 selector: diff --git a/test/integration/manifests/goldpinger/cluster-role-binding.yaml b/test/integration/manifests/goldpinger/cluster-role-binding.yaml index e18b186a12..e28155c1d3 100644 --- a/test/integration/manifests/goldpinger/cluster-role-binding.yaml +++ b/test/integration/manifests/goldpinger/cluster-role-binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: goldpinger-serviceaccount - namespace: default + namespace: linux-datapath-test diff --git a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml index f2eaa0de03..af5a285102 100644 --- a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml +++ b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: goldpinger-host - namespace: default + namespace: linux-datapath-test spec: selector: matchLabels: diff --git a/test/integration/manifests/goldpinger/daemonset.yaml b/test/integration/manifests/goldpinger/daemonset.yaml index ba77fa58ae..864d4a1c50 100644 --- a/test/integration/manifests/goldpinger/daemonset.yaml +++ b/test/integration/manifests/goldpinger/daemonset.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: goldpinger-host - namespace: default + namespace: linux-datapath-test spec: selector: matchLabels: diff --git a/test/integration/manifests/goldpinger/deployment.yaml b/test/integration/manifests/goldpinger/deployment.yaml index 67c9bfaa1c..0aac5300a0 100644 --- a/test/integration/manifests/goldpinger/deployment.yaml +++ b/test/integration/manifests/goldpinger/deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: goldpinger-pod - namespace: default + namespace: linux-datapath-test spec: replicas: 1 selector: diff --git a/test/integration/manifests/goldpinger/service-account.yaml b/test/integration/manifests/goldpinger/service-account.yaml index 7e418b1ebd..82098d1bce 100644 --- a/test/integration/manifests/goldpinger/service-account.yaml +++ b/test/integration/manifests/goldpinger/service-account.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: goldpinger-serviceaccount - namespace: default + namespace: linux-datapath-test diff --git a/test/integration/manifests/goldpinger/service.yaml b/test/integration/manifests/goldpinger/service.yaml index e99b089f38..4c45686386 100644 --- a/test/integration/manifests/goldpinger/service.yaml +++ b/test/integration/manifests/goldpinger/service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: goldpinger - namespace: default + namespace: linux-datapath-test labels: app: goldpinger spec: diff --git a/test/internal/k8sutils/utils.go b/test/internal/k8sutils/utils.go index 21873da72a..c2c78d0f96 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/k8sutils/utils.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "flag" + "fmt" "io" "log" "os" @@ -373,3 +374,8 @@ func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, names } return true, nil } + +// return podLabelSelector and nodeLabelSelector +func CreateLabelSelectors(podLabelKey, nodepoolKey string, podPrefix, nodepoolSelector *string) (string, string) { + return fmt.Sprintf("%s=%s", podLabelKey, *podPrefix), fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) +} From 60e90e6ba46aff8a485ac5324b7fea8dce304d25 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Mon, 24 Jul 2023 19:02:46 -0400 Subject: [PATCH 13/36] fix a minor issue --- test/integration/datapath/datapath_linux_test.go | 11 +++++------ test/integration/manifests/goldpinger/daemonset.yaml | 2 +- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index b1f875b9f1..7f79b86472 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -83,9 +83,6 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Create Label Selectors") podLabelSelector, nodeLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) - t.Logf("nodeLabelSelector IS %s", nodeLabelSelector) - t.Logf("podLabelSelector uis %s", podLabelSelector) - t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { @@ -215,10 +212,7 @@ func TestDatapathLinux(t *testing.T) { clientset, _ := k8sutils.MustGetClientset() setupLinuxEnvironment(t) - t.Logf("nodepoolSelector is %s", *nodepoolSelector) - t.Logf("podPrefix is %s", *podPrefix) podLabelSelector, _ := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) - t.Logf("podLabelSelector is %s", podLabelSelector) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health @@ -342,4 +336,9 @@ func TestDatapathLinux(t *testing.T) { t.Log("all pings successful!") }) }) + + // delete namespace after test is done + if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { + require.NoError(t, err) + } } diff --git a/test/integration/manifests/goldpinger/daemonset.yaml b/test/integration/manifests/goldpinger/daemonset.yaml index 864d4a1c50..fc041ed0c6 100644 --- a/test/integration/manifests/goldpinger/daemonset.yaml +++ b/test/integration/manifests/goldpinger/daemonset.yaml @@ -77,4 +77,4 @@ spec: path: /healthz port: 8080 initialDelaySeconds: 5 - periodSeconds: 5 + periodSeconds: 5 \ No newline at end of file From e5b17184043a83364f3fb8a6db5f709e85e52e98 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Mon, 24 Jul 2023 19:03:59 -0400 Subject: [PATCH 14/36] remove conflicts --- hack/scripts/ovs_scripts/README.md | 20 ----- .../scripts/ovs_scripts/remove_ovs_bridges.py | 85 ------------------- .../ovs_scripts/remove_ovs_leaked_rules.py | 43 ---------- 3 files changed, 148 deletions(-) delete mode 100644 hack/scripts/ovs_scripts/README.md delete mode 100644 hack/scripts/ovs_scripts/remove_ovs_bridges.py delete mode 100644 hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py diff --git a/hack/scripts/ovs_scripts/README.md b/hack/scripts/ovs_scripts/README.md deleted file mode 100644 index 89f6632e80..0000000000 --- a/hack/scripts/ovs_scripts/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# ovs_scripts - -`remove_ovs_bridges.py` script is for removing ovs switch(azure0) and the and openflow rules configured with it -ssh connection will not be lost when running script -It will get all existing ovs bridges and delete them and then delete CNI state file -and all interfaces starting with `az` that are used for supporting apipa connectivity. After that, -it will bring back VM to original state with eth0 as primary interface and -check if Linux VM internet connectivity is still working. - -`remove_ovs_leaked_rules.py` script is for removeing all leaked ovs rules -It will check ovs flow dumps and filter which ports are being used. Then delete these ovs rules that -are not associated with used ports. - -To run these script, clone scripts to Linux VM with ovs and have Python3 environment ready: -paulyu@paul-microsoft:~$ which python3 -/usr/bin/python3 - -Run script: -python3 remove_ovs_bridges.py -python3 remove_ovs_leaked_rules.py diff --git a/hack/scripts/ovs_scripts/remove_ovs_bridges.py b/hack/scripts/ovs_scripts/remove_ovs_bridges.py deleted file mode 100644 index fef1097ad6..0000000000 --- a/hack/scripts/ovs_scripts/remove_ovs_bridges.py +++ /dev/null @@ -1,85 +0,0 @@ -import subprocess -import os -import re -import requests - -# step 1: get all ovs bridges: -try: - ovsBridgeShow = subprocess.Popen(['ovs-vsctl', 'list-br'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) -except subprocess.CalledProcessError: - print("failed to execute ovs-vsctl show command") - os.Exit(1) - -stdout = ovsBridgeShow.communicate() -ovsBridgeList = stdout[0].decode("utf-8".strip()).split('\n') - -# step 2: remove all ovs bridges -for bridge in ovsBridgeList: - if bridge != "": - deleteCommand = "ovs-vsctl del-br %s"%bridge - try: - print("deleting ovs bridge by: ", deleteCommand) - os.system(deleteCommand) - except: - print("failed to delete all ovs bridges") - -# step 3: reset vSwitch configuration to clean state and delete manager -try: - os.system("ovs-vsctl del-manager") - os.system("ovs-vsctl emer-reset") -except: - print("failed to reset vSwitch configuration and delete manager") - -# step 4: check if ovs flows exist anymore -try: - ovsDPCtlShow = subprocess.Popen(['ovs-dpctl', 'show'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) -except subprocess.CalledProcessError: - print("failed to execute ovs-dpctl show command") - os.Exit(1) - -stdout = ovsDPCtlShow.communicate() -if stdout[0].decode("utf-8") != "": - print("ovs flows still exist, please check if all ovs bridges are removed from system") - os.Exit(1) - -# step 5: delete cni state file: -cniStatePath = "/var/run/azure-vnet.json" -if os.path.exists(cniStatePath): - try: - os.system("rm /var/run/azure-vnet.json") - except: - print("failed to delete cni state file") - os.Exit(1) - -# step 6: delete az* interfaces as supporting for apipa connectivity -try: - ovsBridgeShow = subprocess.Popen(['ls', '/sys/class/net'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) -except subprocess.CalledProcessError: - print("failed to execute get all interfaces command") - os.Exit(1) - -stdout = ovsBridgeShow.communicate() -for interface in stdout[0].decode("utf-8").split('\n'): - if interface.startswith("az"): - try: - ovsBridgeShow = subprocess.Popen(['ip', 'link', 'delete', interface], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) - except subprocess.CalledProcessError: - print("failed to delete interface: ", interface) - os.Exit(1) - -# step 7: check internet connectivity after ovs bridges are removed -url = "http://www.bing.com" -timeout = 5 -try: - request = requests.get(url, timeout=timeout) - print("Connected to the Internet") -except (requests.ConnectionError, requests.Timeout) as exception: - print("No internet connection.") \ No newline at end of file diff --git a/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py b/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py deleted file mode 100644 index 6a6e57d801..0000000000 --- a/hack/scripts/ovs_scripts/remove_ovs_leaked_rules.py +++ /dev/null @@ -1,43 +0,0 @@ -import subprocess -import re -import os - -# step 1: get ovs-dpctl show out to make sure which ports are being used -try: - ovsDPCtlShow = subprocess.Popen(['ovs-dpctl', 'show'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) -except subprocess.CalledProcessError: - print("failed to execute ovs-dpctl show command") - os.Exit(1) - -stdout = ovsDPCtlShow.communicate() - -usedPortList = re.findall("port (\d+)", str(stdout)) - -# Step 2: Check ovs flows dumps -try: - ovsDumpFlows = subprocess.Popen(['ovs-ofctl', 'dump-flows', 'azure0'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) -except subprocess.CalledProcessError: - print("failed to execute ovs-ofctl dump-flows command") - os.Exit(1) - -stdout = ovsDumpFlows.communicate() -allPortList = re.findall("in_port=(\d+)", str(stdout)) - -unUsedPortList = [] -for port in allPortList: - if port not in usedPortList: - unUsedPortList.append(port) - -# Step 3: delete leaked rules -# only use unused ports -for port in unUsedPortList: - deleteCommand = "ovs-ofctl del-flows azure0 ip,in_port=%s"%port - try: - os.system(deleteCommand) - except: - print("delete command %s does not work"%deleteCommand) - os.Exit(1) \ No newline at end of file From b154ab6c1af88b0957bf06fb523a7fa5905c7e79 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Mon, 24 Jul 2023 19:06:21 -0400 Subject: [PATCH 15/36] fix comment --- test/integration/datapath/datapath_linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 7f79b86472..154e9eddda 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -57,7 +57,7 @@ var ( /* This test assumes that you have the current credentials loaded in your default kubeconfig for a k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. -*** The expected nodepool name is a random Linux nodepool name +*** The expected nodepool name is nodepool1, if the nodepool has a different name ensure that you change nodepoolSelector with: To run the test use one of the following commands: go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration From a13b9de6224b0a25f7696b26a7ed8f6eecf5322e Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 13:10:37 -0400 Subject: [PATCH 16/36] fix comments --- test/integration/datapath/datapath_linux_test.go | 13 ++++++++----- test/integration/datapath/datapath_windows_test.go | 3 ++- test/internal/k8sutils/utils.go | 6 +++--- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 154e9eddda..be409c56b7 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -58,6 +58,7 @@ var ( This test assumes that you have the current credentials loaded in your default kubeconfig for a k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. *** The expected nodepool name is nodepool1, if the nodepool has a different name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" To run the test use one of the following commands: go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration @@ -81,7 +82,8 @@ func setupLinuxEnvironment(t *testing.T) { } t.Log("Create Label Selectors") - podLabelSelector, nodeLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelectors(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) @@ -130,7 +132,7 @@ func setupLinuxEnvironment(t *testing.T) { } } - rbacCleanUpFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) if err != nil { t.Log(os.Getwd()) t.Fatal(err) @@ -158,7 +160,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Cleanup(func() { t.Log("cleaning up resources") - rbacCleanUpFn() + rbacSetupFn() if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { t.Log(err) @@ -181,10 +183,11 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Successfully created customer singlestack Linux pods") } } else { - // delete namespace + // delete namespace and stop test cases if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { require.NoError(t, err) } + return } t.Log("Checking Linux test environment") @@ -212,7 +215,7 @@ func TestDatapathLinux(t *testing.T) { clientset, _ := k8sutils.MustGetClientset() setupLinuxEnvironment(t) - podLabelSelector, _ := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index b7d45200b4..45906a5907 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -58,7 +58,8 @@ func TestDatapathWin(t *testing.T) { restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector, nodeLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, nodepoolKey, podPrefix, nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelectors(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) diff --git a/test/internal/k8sutils/utils.go b/test/internal/k8sutils/utils.go index c2c78d0f96..0c4212dded 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/k8sutils/utils.go @@ -375,7 +375,7 @@ func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, names return true, nil } -// return podLabelSelector and nodeLabelSelector -func CreateLabelSelectors(podLabelKey, nodepoolKey string, podPrefix, nodepoolSelector *string) (string, string) { - return fmt.Sprintf("%s=%s", podLabelKey, *podPrefix), fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) +// return a label selector +func CreateLabelSelectors(key string, selector *string) string { + return fmt.Sprintf("%s=%s", key, *selector) } From 86227736a5070520bf1a0d2108bcd0d109a89ddc Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 13:28:12 -0400 Subject: [PATCH 17/36] rerun test --- test/integration/datapath/datapath_linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index be409c56b7..3fd37b945b 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -187,7 +187,7 @@ func setupLinuxEnvironment(t *testing.T) { if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { require.NoError(t, err) } - return + t.fatal("goldpinger namespace exists and was deleted. Re-run test") } t.Log("Checking Linux test environment") From 1e1ae11c49688ccf0183e77a04fc3d0cceff8b99 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 13:29:45 -0400 Subject: [PATCH 18/36] rerun test --- test/integration/datapath/datapath_linux_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 3fd37b945b..a07210a48f 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -187,7 +187,7 @@ func setupLinuxEnvironment(t *testing.T) { if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { require.NoError(t, err) } - t.fatal("goldpinger namespace exists and was deleted. Re-run test") + t.Fatal("goldpinger namespace exists and was deleted. Re-run test") } t.Log("Checking Linux test environment") From 7478d7fb9f0c314554c0db2dba49e372908f392f Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 14:32:35 -0400 Subject: [PATCH 19/36] fix comments --- .../datapath/datapath_linux_test.go | 32 ++----------------- .../datapath/datapath_windows_test.go | 4 +-- test/internal/k8sutils/utils.go | 2 +- 3 files changed, 6 insertions(+), 32 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index a07210a48f..73afd0408c 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -82,8 +82,8 @@ func setupLinuxEnvironment(t *testing.T) { } t.Log("Create Label Selectors") - podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) - nodeLabelSelector := k8sutils.CreateLabelSelectors(nodepoolKey, nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) @@ -215,37 +215,11 @@ func TestDatapathLinux(t *testing.T) { clientset, _ := k8sutils.MustGetClientset() setupLinuxEnvironment(t) - podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health t.Run("all pods have IPs assigned", func(t *testing.T) { - podsClient := clientset.CoreV1().Pods(*podNamespace) - - _ = func() error { - podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) - if err != nil { - return err - } - - if len(podList.Items) == 0 { - return errors.New("no pods scheduled") - } - - for _, pod := range podList.Items { - if pod.Status.Phase == apiv1.PodPending { - return errors.New("some pods still pending") - } - } - - for _, pod := range podList.Items { - if pod.Status.PodIP == "" { - return errors.New("a pod has not been allocated an IP") - } - } - - return nil - } err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { require.NoError(t, err) diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index 45906a5907..ae27ace2ce 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -58,8 +58,8 @@ func TestDatapathWin(t *testing.T) { restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector := k8sutils.CreateLabelSelectors(podLabelKey, podPrefix) - nodeLabelSelector := k8sutils.CreateLabelSelectors(nodepoolKey, nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) diff --git a/test/internal/k8sutils/utils.go b/test/internal/k8sutils/utils.go index 0c4212dded..7697e43c31 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/k8sutils/utils.go @@ -376,6 +376,6 @@ func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, names } // return a label selector -func CreateLabelSelectors(key string, selector *string) string { +func CreateLabelSelector(key string, selector *string) string { return fmt.Sprintf("%s=%s", key, *selector) } From a8642c8c482c99e98a9971bbc13c82da03840667 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 16:05:38 -0400 Subject: [PATCH 20/36] change namespace back to default --- test/integration/datapath/datapath_linux_test.go | 2 +- test/integration/k8s_test.go | 2 +- test/integration/manifests/goldpinger/cluster-role-binding.yaml | 2 +- test/integration/manifests/goldpinger/daemonset-ipv6.yaml | 2 +- test/integration/manifests/goldpinger/daemonset.yaml | 2 +- test/integration/manifests/goldpinger/deployment.yaml | 2 +- test/integration/manifests/goldpinger/service-account.yaml | 2 +- test/integration/manifests/goldpinger/service.yaml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 73afd0408c..fba524e17e 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -187,7 +187,7 @@ func setupLinuxEnvironment(t *testing.T) { if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { require.NoError(t, err) } - t.Fatal("goldpinger namespace exists and was deleted. Re-run test") + t.Fatal("goldpinger namespace existed and it was deleted. Please re-run test cases") } t.Log("Checking Linux test environment") diff --git a/test/integration/k8s_test.go b/test/integration/k8s_test.go index a7783023b8..e97995197e 100644 --- a/test/integration/k8s_test.go +++ b/test/integration/k8s_test.go @@ -188,7 +188,7 @@ func TestPodScaling(t *testing.T) { defer cancel() pfOpts := PortForwardingOpts{ - Namespace: "linux-datapath-test", + Namespace: "default", LabelSelector: "type=goldpinger-pod", LocalPort: 9090, DestPort: 8080, diff --git a/test/integration/manifests/goldpinger/cluster-role-binding.yaml b/test/integration/manifests/goldpinger/cluster-role-binding.yaml index e28155c1d3..e18b186a12 100644 --- a/test/integration/manifests/goldpinger/cluster-role-binding.yaml +++ b/test/integration/manifests/goldpinger/cluster-role-binding.yaml @@ -9,4 +9,4 @@ roleRef: subjects: - kind: ServiceAccount name: goldpinger-serviceaccount - namespace: linux-datapath-test + namespace: default diff --git a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml index af5a285102..f2eaa0de03 100644 --- a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml +++ b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: goldpinger-host - namespace: linux-datapath-test + namespace: default spec: selector: matchLabels: diff --git a/test/integration/manifests/goldpinger/daemonset.yaml b/test/integration/manifests/goldpinger/daemonset.yaml index fc041ed0c6..41a86cc3f0 100644 --- a/test/integration/manifests/goldpinger/daemonset.yaml +++ b/test/integration/manifests/goldpinger/daemonset.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: goldpinger-host - namespace: linux-datapath-test + namespace: default spec: selector: matchLabels: diff --git a/test/integration/manifests/goldpinger/deployment.yaml b/test/integration/manifests/goldpinger/deployment.yaml index 0aac5300a0..67c9bfaa1c 100644 --- a/test/integration/manifests/goldpinger/deployment.yaml +++ b/test/integration/manifests/goldpinger/deployment.yaml @@ -2,7 +2,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: goldpinger-pod - namespace: linux-datapath-test + namespace: default spec: replicas: 1 selector: diff --git a/test/integration/manifests/goldpinger/service-account.yaml b/test/integration/manifests/goldpinger/service-account.yaml index 82098d1bce..7e418b1ebd 100644 --- a/test/integration/manifests/goldpinger/service-account.yaml +++ b/test/integration/manifests/goldpinger/service-account.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: goldpinger-serviceaccount - namespace: linux-datapath-test + namespace: default diff --git a/test/integration/manifests/goldpinger/service.yaml b/test/integration/manifests/goldpinger/service.yaml index 4c45686386..e99b089f38 100644 --- a/test/integration/manifests/goldpinger/service.yaml +++ b/test/integration/manifests/goldpinger/service.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Service metadata: name: goldpinger - namespace: linux-datapath-test + namespace: default labels: app: goldpinger spec: From dc40c968a9c5fa5379ff2e02c1c20487a2870bfa Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 19:17:27 -0400 Subject: [PATCH 21/36] add namespace fixes --- .../datapath/datapath_linux_test.go | 152 ++++++++---------- test/internal/k8sutils/utils_create.go | 2 +- test/internal/k8sutils/utils_delete.go | 2 +- 3 files changed, 71 insertions(+), 85 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index fba524e17e..be923aa514 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -18,7 +18,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" - apiv1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -44,7 +44,7 @@ const ( var ( podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") - podNamespace = flag.String("namespace", "linux-datapath-test", "Namespace for test pods") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Linux Node-Selector for pods") // TODO: add flag to support dual nic scenario isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") @@ -91,103 +91,85 @@ func setupLinuxEnvironment(t *testing.T) { require.NoError(t, err, "could not get k8s node list: %v", err) } - // Create namespace if it doesn't exist - namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) - if err != nil { - require.NoError(t, err, "failed to check if namespace %s exists due to: %v", *podNamespace, err) - } - - if !namespaceExists { - // Test Namespace - t.Log("Create Namespace") - err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) - if err != nil { - require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) - } - - var daemonset appsv1.DaemonSet - var deployment appsv1.Deployment - t.Log("Creating Linux pods through deployment") - - // run goldpinger ipv4 and ipv6 test cases saperately - if *isDualStack { - deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) - if err != nil { - require.NoError(t, err) - } + t.Log("Creating Linux pods through deployment") - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) - if err != nil { - t.Fatal(err) - } - } else { - deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) - if err != nil { - require.NoError(t, err) - } + // run goldpinger ipv4 and ipv6 test cases saperately + var daemonset appsv1.DaemonSet + var deployment appsv1.Deployment - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) - if err != nil { - t.Fatal(err) - } + if *isDualStack { + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) + if err != nil { + require.NoError(t, err) } - rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) if err != nil { - t.Log(os.Getwd()) t.Fatal(err) } - - // Fields for overwritting existing deployment yaml. - // Defaults from flags will not change anything - deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix - deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix - deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector - deployment.Name = *podPrefix - deployment.Namespace = *podNamespace - - deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + } else { + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) if err != nil { require.NoError(t, err) } - daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) - err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) if err != nil { t.Fatal(err) } + } + + // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount + rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + if err != nil { + t.Log(os.Getwd()) + t.Fatal(err) + } - t.Cleanup(func() { - t.Log("cleaning up resources") - rbacSetupFn() + // Fields for overwritting existing deployment yaml. + // Defaults from flags will not change anything + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + deployment.Name = *podPrefix + deployment.Namespace = *podNamespace + daemonset.Namespace = *podNamespace + + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + if err != nil { + require.NoError(t, err) + } - if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { - t.Log(err) - } + daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) + err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + if err != nil { + t.Fatal(err) + } - if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { - t.Log(err) - } - }) + t.Cleanup(func() { + t.Log("cleaning up resources") + rbacSetupFn() - t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) - if err != nil { - require.NoError(t, err) + if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) } - if *isDualStack { - t.Log("Successfully created customer dualstack Linux pods") - } else { - t.Log("Successfully created customer singlestack Linux pods") + if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) } + }) + + t.Log("Waiting for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + require.NoError(t, err) + } + + if *isDualStack { + t.Log("Successfully created customer dualstack Linux pods") } else { - // delete namespace and stop test cases - if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { - require.NoError(t, err) - } - t.Fatal("goldpinger namespace existed and it was deleted. Please re-run test cases") + t.Log("Successfully created customer singlestack Linux pods") } t.Log("Checking Linux test environment") @@ -200,8 +182,16 @@ func setupLinuxEnvironment(t *testing.T) { t.Logf("%s", node.Name) require.NoError(t, errors.New("Less than 2 pods on node")) } + } + errFlag := apierrors.IsAlreadyExists(err) + if errFlag { + if err := k8sutils.MustDeleteDaemonset(ctx, daemonsetClient, daemonset); err != nil { + require.NoError(t, err) + } + t.Fatal("delete all goldpinger hosts and pods under default namespace if there is any error") } + t.Log("Linux test environment ready") } @@ -286,8 +276,9 @@ func TestDatapathLinux(t *testing.T) { } return nil } + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { - t.Fatalf("could not start port forward within %ds: %v", defaultTimeoutSeconds, err) + t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) } defer pf.Stop() @@ -313,9 +304,4 @@ func TestDatapathLinux(t *testing.T) { t.Log("all pings successful!") }) }) - - // delete namespace after test is done - if err := k8sutils.MustDeleteNamespace(ctx, clientset, *podNamespace); err != nil { - require.NoError(t, err) - } } diff --git a/test/internal/k8sutils/utils_create.go b/test/internal/k8sutils/utils_create.go index 8d21af1035..069da82ca0 100644 --- a/test/internal/k8sutils/utils_create.go +++ b/test/internal/k8sutils/utils_create.go @@ -30,7 +30,7 @@ func MustCreateOrUpdatePod(ctx context.Context, podI typedcorev1.PodInterface, p } func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { - if err := mustDeleteDaemonset(ctx, daemonsets, ds); err != nil { + if err := MustDeleteDaemonset(ctx, daemonsets, ds); err != nil { return err } log.Printf("Creating Daemonset %v", ds.Name) diff --git a/test/internal/k8sutils/utils_delete.go b/test/internal/k8sutils/utils_delete.go index 1032b406eb..846f15b2cf 100644 --- a/test/internal/k8sutils/utils_delete.go +++ b/test/internal/k8sutils/utils_delete.go @@ -22,7 +22,7 @@ func MustDeletePod(ctx context.Context, podI typedcorev1.PodInterface, pod corev return nil } -func mustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { +func MustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { if err := daemonsets.Delete(ctx, ds.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { return err From bac5b33d81f76e05cbe9877f598291212d3b89c7 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 19:37:37 -0400 Subject: [PATCH 22/36] add pipeline --- .../dualstackoverlay-e2e-job-template.yaml | 32 ++++++ .../dualstackoverlay-e2e-step-template.yaml | 107 ++++++++++++++++++ hack/aks/Makefile | 27 +++++ hack/aks/README.md | 22 ++-- test/integration/setup_test.go | 28 +++-- 5 files changed, 199 insertions(+), 17 deletions(-) create mode 100644 .pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml create mode 100644 .pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml new file mode 100644 index 0000000000..fadb3196ab --- /dev/null +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml @@ -0,0 +1,32 @@ +parameters: + name: "" + displayName: "" + pipelineBuildImage: "$(BUILD_IMAGE)" + testDropgz: "" + clusterName: "" + +stages: + - stage: ${{ parameters.name }} + displayName: E2E - ${{ parameters.displayName }} + dependsOn: + - setup + - publish + jobs: + - job: ${{ parameters.name }} + displayName: DualStack Overlay Test Suite - (${{ parameters.name }}) + timeoutInMinutes: 120 + pool: + name: $(BUILD_POOL_NAME_DEFAULT) + demands: + - Role -equals $(CUSTOM_E2E_ROLE) + variables: + GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path + GOBIN: "$(GOPATH)/bin" # Go binaries path + modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" + steps: + - template: dualstackoverlay-e2e-step-template.yaml + parameters: + name: ${{ parameters.name }} + testDropgz: ${{ parameters.testDropgz }} + clusterName: ${{ parameters.clusterName }} + \ No newline at end of file diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml new file mode 100644 index 0000000000..866e09495b --- /dev/null +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml @@ -0,0 +1,107 @@ +parameters: + name: "" + testDropgz: "" + clusterName: "" + +steps: + - bash: | + echo $UID + sudo rm -rf $(System.DefaultWorkingDirectory)/* + displayName: "Set up OS environment" + - checkout: self + + - bash: | + go version + go env + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + name: "GoEnv" + displayName: "Set up the Go environment" + - task: AzureCLI@2 + inputs: + azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) + scriptLocation: "inlineScript" + scriptType: "bash" + addSpnToEnvironment: true + inlineScript: | + echo "Check az version" + az version + echo "Install az cli extension preview" + az extension add --name aks-preview + az extension update --name aks-preview + mkdir -p ~/.kube/ + echo "Create AKS DualStack Overlay cluster" + make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) + make -C ./hack/aks dualstack-overlay-byocni-up AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) + echo "Dualstack Overlay Cluster is successfully created" + displayName: Create DualStackOverlay cluster + condition: succeeded() + + - script: | + ls -lah + pwd + echo "installing kubectl" + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + kubectl cluster-info + kubectl get node + kubectl get po -owide -A + sudo -E env "PATH=$PATH" make install-azure-images CNS_VERSION=$(make cns-version) CNI_DROPGZ_VERSION=$(make cni-dropgz-version) INSTALL_CNS=true INSTALL_AZURE_VNET=true INSTALL_DUALSTACK_OVERLAY=true TEST_DROPGZ=${{ parameters.testDropgz }} + kubectl get po -owide -A + retryCountOnTaskFailure: 3 + name: "installKubectl" + displayName: "Install kubectl on AKS dualstack overlay cluster" + + - script: | + cd test/integration/load + cd ../datapath + sudo go test -count=1 datapath_linux_test.go -timeout 1m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + name: "DualStack_Overlay_Linux_Datapath_tests" + displayName: "DualStack Overlay Linux Datapath Tests" + + # - task: AzureCLI@2 + # inputs: + # azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) + # scriptLocation: "inlineScript" + # scriptType: "bash" + # addSpnToEnvironment: true + # inlineScript: | + # make -C ./hack/aks dualstack-windows-byocni-up AZCLI=az VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) + # echo "Windows node are successfully added to Dualstack Overlay Cluster" + # kubectl cluster-info + # kubectl get node + # kubectl get po -owide -A + # name: "Add_Windows_Node" + # displayName: "Add windows node on DualStackOverlay cluster" + + # - script: | + # pwd + # cd test/integration/load + # go test -timeout 30m -tags load -run ^TestLoad$ -tags=load -os=windows + # echo "DualStack Overlay Windows control plane CNS validation test" + # go test -timeout 30m -tags load -run ^TestDualStackProperties$ -tags=load -os=windows -cni cniv2 + # echo "DualStack Overlay Windows control plane Node properties test" + # go test -timeout 30m -tags load -cni cniv2 -run ^TestValidateState$ -tags=load -os=windows + # echo "DualStack Overlay Windows datapath test" + # cd ../datapath + # go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection + # name: "DualStack_Overlay_Windows_tests" + # displayName: "DualStack Overlay Windows Tests" + + # - task: AzureCLI@2 + # inputs: + # azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) + # scriptLocation: "inlineScript" + # scriptType: "bash" + # addSpnToEnvironment: true + # inlineScript: | + # echo "Deleting cluster" + # make -C ./hack/aks azcfg AZCLI=az + # make -C ./hack/aks down SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(make revision) + # echo "Cluster and resources down" + # name: "CleanupDualStackOverlaycluster" + # displayName: "Cleanup DualStack Overlay Cluster" + # condition: always() \ No newline at end of file diff --git a/hack/aks/Makefile b/hack/aks/Makefile index 14e2f1e0f7..abe845de2e 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -214,6 +214,33 @@ windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster @$(MAKE) set-kubeconf +dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --kubernetes-version 1.26.3 \ + --node-count $(NODE_COUNT) \ + --node-vm-size $(VM_SIZE) \ + --network-plugin azure \ + --ip-families ipv4,ipv6 \ + --network-plugin-mode overlay \ + --subscription $(SUB) \ + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf + +dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster + $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ + --kubernetes-version 1.26.3 \ + --node-count $(NODE_COUNT) \ + --node-vm-size $(VM_SIZE) \ + --network-plugin none \ + --network-plugin-mode overlay \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ + --ip-families ipv4,ipv6 \ + --subscription $(SUB) \ + --no-ssh-key \ + --yes + @$(MAKE) set-kubeconf + linux-cniv1-up: rg-up overlay-net-up $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --node-count $(NODE_COUNT) \ diff --git a/hack/aks/README.md b/hack/aks/README.md index 3a4e80e4f0..1a2eeceae4 100644 --- a/hack/aks/README.md +++ b/hack/aks/README.md @@ -21,14 +21,16 @@ SWIFT Infra net-up Create required swift vnet/subnets AKS Clusters - byocni-up Alias to swift-byocni-up - cilium-up Alias to swift-cilium-up - up Alias to swift-up - overlay-up Brings up an Overlay AzCNI cluster - swift-byocni-up Bring up a SWIFT BYO CNI cluster - swift-cilium-up Bring up a SWIFT Cilium cluster - swift-up Bring up a SWIFT AzCNI cluster - windows-cniv1-up Bring up a Windows AzCNIv1 cluster - down Delete the cluster - vmss-restart Restart the nodes of the cluster + byocni-up Alias to swift-byocni-up + cilium-up Alias to swift-cilium-up + up Alias to swift-up + overlay-up Brings up an Overlay AzCNI cluster + swift-byocni-up Bring up a SWIFT BYO CNI cluster + swift-cilium-up Bring up a SWIFT Cilium cluster + swift-up Bring up a SWIFT AzCNI cluster + dualstack-overlay-up Brings up an dualstack overlay cluster + dualstack-overlay-byocni-up Brings up an dualstack overlay cluster without CNS and CNI installed + windows-cniv1-up Bring up a Windows AzCNIv1 cluster + down Delete the cluster + vmss-restart Restart the nodes of the cluster ``` diff --git a/test/integration/setup_test.go b/test/integration/setup_test.go index 5ece6e6498..ff67f7c784 100644 --- a/test/integration/setup_test.go +++ b/test/integration/setup_test.go @@ -17,13 +17,14 @@ import ( const ( exitFail = 1 - envTestDropgz = "TEST_DROPGZ" - envCNIDropgzVersion = "CNI_DROPGZ_VERSION" - envCNSVersion = "CNS_VERSION" - envInstallCNS = "INSTALL_CNS" - envInstallAzilium = "INSTALL_AZILIUM" - envInstallAzureVnet = "INSTALL_AZURE_VNET" - envInstallOverlay = "INSTALL_OVERLAY" + envTestDropgz = "TEST_DROPGZ" + envCNIDropgzVersion = "CNI_DROPGZ_VERSION" + envCNSVersion = "CNS_VERSION" + envInstallCNS = "INSTALL_CNS" + envInstallAzilium = "INSTALL_AZILIUM" + envInstallAzureVnet = "INSTALL_AZURE_VNET" + envInstallOverlay = "INSTALL_OVERLAY" + envInstallDualStackOverlay = "INSTALL_DUALSTACK_OVERLAY" // relative cns manifest paths cnsManifestFolder = "manifests/cns" @@ -160,6 +161,19 @@ func installCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, l log.Printf("Env %v not set to true, skipping", envInstallOverlay) } + if installBool4 := os.Getenv(envInstallDualStackOverlay); installBool4 != "" { + if dualStackOverlayScenario, err := strconv.ParseBool(installBool4); err == nil && dualStackOverlayScenario == true { + log.Printf("Env %v set to true, deploy azure-vnet", envInstallDualStackOverlay) + cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", "/opt/cni/bin/azure-vnet-ipam", "azure-swift-overlay-dualstack.conflist", "-o", "/etc/cni/net.d/10-azure.conflist"} + } + // setup the CNS swiftconfigmap + if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsSwiftConfigMapPath); err != nil { + return nil, err + } + } else { + log.Printf("Env %v not set to true, skipping", envInstallDualStackOverlay) + } + cnsDaemonsetClient := clientset.AppsV1().DaemonSets(cns.Namespace) log.Printf("Installing CNS with image %s", cns.Spec.Template.Spec.Containers[0].Image) From 45ea65d7ed295b3852cec9289def91d95be8d39a Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 19:40:29 -0400 Subject: [PATCH 23/36] add pipeline --- .pipelines/pipeline.yaml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 137ea96f1f..6b5b6c38e3 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -332,6 +332,14 @@ stages: testDropgz: "" clusterName: "overlaye2e" + - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml + parameters: + name: "dualstackoverlay_e2e" + displayName: AKS DualStack Overlay + pipelineBuildImage: "$(BUILD_IMAGE)" + testDropgz: "" + clusterName: "dualstackoverlaye2e" + - template: singletenancy/aks-swift/e2e-job-template.yaml parameters: name: "aks_swift_e2e" From 2c70b7fc872b1f66da6e0a2bf312b6491f705da8 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 20:40:13 -0400 Subject: [PATCH 24/36] add logs --- .../dualstack-overlay/dualstackoverlay-e2e-step-template.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml index 866e09495b..379aeb6957 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml @@ -58,7 +58,7 @@ steps: - script: | cd test/integration/load cd ../datapath - sudo go test -count=1 datapath_linux_test.go -timeout 1m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true + sudo go test -count=1 datapath_linux_test.go -timeout 1m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true -v name: "DualStack_Overlay_Linux_Datapath_tests" displayName: "DualStack Overlay Linux Datapath Tests" From 69b112f35c3e73b7b2f236686fbaa966ca791c7c Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 22:43:32 -0400 Subject: [PATCH 25/36] fix dualstack pipeline setup --- .../dualstack-overlay/dualstackoverlay-e2e-step-template.yaml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml index 379aeb6957..0ebc45882f 100644 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml +++ b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml @@ -35,7 +35,7 @@ steps: mkdir -p ~/.kube/ echo "Create AKS DualStack Overlay cluster" make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) - make -C ./hack/aks dualstack-overlay-byocni-up AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) + make -C ./hack/aks dualstack-overlay-up AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) echo "Dualstack Overlay Cluster is successfully created" displayName: Create DualStackOverlay cluster condition: succeeded() @@ -49,8 +49,6 @@ steps: kubectl cluster-info kubectl get node kubectl get po -owide -A - sudo -E env "PATH=$PATH" make install-azure-images CNS_VERSION=$(make cns-version) CNI_DROPGZ_VERSION=$(make cni-dropgz-version) INSTALL_CNS=true INSTALL_AZURE_VNET=true INSTALL_DUALSTACK_OVERLAY=true TEST_DROPGZ=${{ parameters.testDropgz }} - kubectl get po -owide -A retryCountOnTaskFailure: 3 name: "installKubectl" displayName: "Install kubectl on AKS dualstack overlay cluster" From 01e025f5ed6342bbe2c7fcd53038bd529bb0cf02 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Tue, 25 Jul 2023 23:31:57 -0400 Subject: [PATCH 26/36] add AzureOverlayDualStackPreview --- hack/aks/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index abe845de2e..f642b25669 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -222,6 +222,7 @@ dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay clu --network-plugin azure \ --ip-families ipv4,ipv6 \ --network-plugin-mode overlay \ + --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ --subscription $(SUB) \ --no-ssh-key \ --yes From 4d4e0fc35c65027c6aad8f3bb576bc3f7fa101bc Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 07:20:22 -0400 Subject: [PATCH 27/36] delete pipeline templates --- .pipelines/pipeline.yaml | 8 -- .../dualstackoverlay-e2e-job-template.yaml | 32 ------ .../dualstackoverlay-e2e-step-template.yaml | 105 ------------------ 3 files changed, 145 deletions(-) delete mode 100644 .pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml delete mode 100644 .pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml diff --git a/.pipelines/pipeline.yaml b/.pipelines/pipeline.yaml index 6b5b6c38e3..137ea96f1f 100644 --- a/.pipelines/pipeline.yaml +++ b/.pipelines/pipeline.yaml @@ -332,14 +332,6 @@ stages: testDropgz: "" clusterName: "overlaye2e" - - template: singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml - parameters: - name: "dualstackoverlay_e2e" - displayName: AKS DualStack Overlay - pipelineBuildImage: "$(BUILD_IMAGE)" - testDropgz: "" - clusterName: "dualstackoverlaye2e" - - template: singletenancy/aks-swift/e2e-job-template.yaml parameters: name: "aks_swift_e2e" diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml deleted file mode 100644 index fadb3196ab..0000000000 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-job-template.yaml +++ /dev/null @@ -1,32 +0,0 @@ -parameters: - name: "" - displayName: "" - pipelineBuildImage: "$(BUILD_IMAGE)" - testDropgz: "" - clusterName: "" - -stages: - - stage: ${{ parameters.name }} - displayName: E2E - ${{ parameters.displayName }} - dependsOn: - - setup - - publish - jobs: - - job: ${{ parameters.name }} - displayName: DualStack Overlay Test Suite - (${{ parameters.name }}) - timeoutInMinutes: 120 - pool: - name: $(BUILD_POOL_NAME_DEFAULT) - demands: - - Role -equals $(CUSTOM_E2E_ROLE) - variables: - GOPATH: "$(Agent.TempDirectory)/go" # Go workspace path - GOBIN: "$(GOPATH)/bin" # Go binaries path - modulePath: "$(GOPATH)/src/github.com/Azure/azure-container-networking" - steps: - - template: dualstackoverlay-e2e-step-template.yaml - parameters: - name: ${{ parameters.name }} - testDropgz: ${{ parameters.testDropgz }} - clusterName: ${{ parameters.clusterName }} - \ No newline at end of file diff --git a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml b/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml deleted file mode 100644 index 0ebc45882f..0000000000 --- a/.pipelines/singletenancy/dualstack-overlay/dualstackoverlay-e2e-step-template.yaml +++ /dev/null @@ -1,105 +0,0 @@ -parameters: - name: "" - testDropgz: "" - clusterName: "" - -steps: - - bash: | - echo $UID - sudo rm -rf $(System.DefaultWorkingDirectory)/* - displayName: "Set up OS environment" - - checkout: self - - - bash: | - go version - go env - mkdir -p '$(GOBIN)' - mkdir -p '$(GOPATH)/pkg' - mkdir -p '$(modulePath)' - echo '##vso[task.prependpath]$(GOBIN)' - echo '##vso[task.prependpath]$(GOROOT)/bin' - name: "GoEnv" - displayName: "Set up the Go environment" - - task: AzureCLI@2 - inputs: - azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) - scriptLocation: "inlineScript" - scriptType: "bash" - addSpnToEnvironment: true - inlineScript: | - echo "Check az version" - az version - echo "Install az cli extension preview" - az extension add --name aks-preview - az extension update --name aks-preview - mkdir -p ~/.kube/ - echo "Create AKS DualStack Overlay cluster" - make -C ./hack/aks azcfg AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) - make -C ./hack/aks dualstack-overlay-up AZCLI=az REGION=$(REGION_OVERLAY_CLUSTER_TEST) VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) - echo "Dualstack Overlay Cluster is successfully created" - displayName: Create DualStackOverlay cluster - condition: succeeded() - - - script: | - ls -lah - pwd - echo "installing kubectl" - curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - kubectl cluster-info - kubectl get node - kubectl get po -owide -A - retryCountOnTaskFailure: 3 - name: "installKubectl" - displayName: "Install kubectl on AKS dualstack overlay cluster" - - - script: | - cd test/integration/load - cd ../datapath - sudo go test -count=1 datapath_linux_test.go -timeout 1m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration -isDualStack=true -v - name: "DualStack_Overlay_Linux_Datapath_tests" - displayName: "DualStack Overlay Linux Datapath Tests" - - # - task: AzureCLI@2 - # inputs: - # azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) - # scriptLocation: "inlineScript" - # scriptType: "bash" - # addSpnToEnvironment: true - # inlineScript: | - # make -C ./hack/aks dualstack-windows-byocni-up AZCLI=az VM_SIZE=$(VM_SIZE) NODE_COUNT=$(NODE_COUNT) SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) CLUSTER=${{ parameters.clusterName }}-$(make revision) - # echo "Windows node are successfully added to Dualstack Overlay Cluster" - # kubectl cluster-info - # kubectl get node - # kubectl get po -owide -A - # name: "Add_Windows_Node" - # displayName: "Add windows node on DualStackOverlay cluster" - - # - script: | - # pwd - # cd test/integration/load - # go test -timeout 30m -tags load -run ^TestLoad$ -tags=load -os=windows - # echo "DualStack Overlay Windows control plane CNS validation test" - # go test -timeout 30m -tags load -run ^TestDualStackProperties$ -tags=load -os=windows -cni cniv2 - # echo "DualStack Overlay Windows control plane Node properties test" - # go test -timeout 30m -tags load -cni cniv2 -run ^TestValidateState$ -tags=load -os=windows - # echo "DualStack Overlay Windows datapath test" - # cd ../datapath - # go test -count=1 datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection - # name: "DualStack_Overlay_Windows_tests" - # displayName: "DualStack Overlay Windows Tests" - - # - task: AzureCLI@2 - # inputs: - # azureSubscription: $(AZURE_TEST_AGENT_SERVICE_CONNECTION) - # scriptLocation: "inlineScript" - # scriptType: "bash" - # addSpnToEnvironment: true - # inlineScript: | - # echo "Deleting cluster" - # make -C ./hack/aks azcfg AZCLI=az - # make -C ./hack/aks down SUB=$(SUB_AZURE_NETWORK_AGENT_TEST) AZCLI=az CLUSTER=${{ parameters.clusterName }}-$(make revision) - # echo "Cluster and resources down" - # name: "CleanupDualStackOverlaycluster" - # displayName: "Cleanup DualStack Overlay Cluster" - # condition: always() \ No newline at end of file From 988c53001e9e13519a2ee5ec0c4318c7cda380af Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 09:07:13 -0400 Subject: [PATCH 28/36] put installdualstackoverlayp --- test/integration/setup_test.go | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/test/integration/setup_test.go b/test/integration/setup_test.go index ff67f7c784..5ece6e6498 100644 --- a/test/integration/setup_test.go +++ b/test/integration/setup_test.go @@ -17,14 +17,13 @@ import ( const ( exitFail = 1 - envTestDropgz = "TEST_DROPGZ" - envCNIDropgzVersion = "CNI_DROPGZ_VERSION" - envCNSVersion = "CNS_VERSION" - envInstallCNS = "INSTALL_CNS" - envInstallAzilium = "INSTALL_AZILIUM" - envInstallAzureVnet = "INSTALL_AZURE_VNET" - envInstallOverlay = "INSTALL_OVERLAY" - envInstallDualStackOverlay = "INSTALL_DUALSTACK_OVERLAY" + envTestDropgz = "TEST_DROPGZ" + envCNIDropgzVersion = "CNI_DROPGZ_VERSION" + envCNSVersion = "CNS_VERSION" + envInstallCNS = "INSTALL_CNS" + envInstallAzilium = "INSTALL_AZILIUM" + envInstallAzureVnet = "INSTALL_AZURE_VNET" + envInstallOverlay = "INSTALL_OVERLAY" // relative cns manifest paths cnsManifestFolder = "manifests/cns" @@ -161,19 +160,6 @@ func installCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, l log.Printf("Env %v not set to true, skipping", envInstallOverlay) } - if installBool4 := os.Getenv(envInstallDualStackOverlay); installBool4 != "" { - if dualStackOverlayScenario, err := strconv.ParseBool(installBool4); err == nil && dualStackOverlayScenario == true { - log.Printf("Env %v set to true, deploy azure-vnet", envInstallDualStackOverlay) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", "/opt/cni/bin/azure-vnet-ipam", "azure-swift-overlay-dualstack.conflist", "-o", "/etc/cni/net.d/10-azure.conflist"} - } - // setup the CNS swiftconfigmap - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsSwiftConfigMapPath); err != nil { - return nil, err - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallDualStackOverlay) - } - cnsDaemonsetClient := clientset.AppsV1().DaemonSets(cns.Namespace) log.Printf("Installing CNS with image %s", cns.Spec.Template.Spec.Containers[0].Image) From 3b3904e40f7960c67cdaa3cdc6f4b900f0077a74 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 12:42:26 -0400 Subject: [PATCH 29/36] fix comments --- hack/aks/Makefile | 28 ------------------- .../datapath/datapath_linux_test.go | 16 +++-------- 2 files changed, 4 insertions(+), 40 deletions(-) diff --git a/hack/aks/Makefile b/hack/aks/Makefile index f642b25669..14e2f1e0f7 100644 --- a/hack/aks/Makefile +++ b/hack/aks/Makefile @@ -214,34 +214,6 @@ windows-cniv1-up: rg-up overlay-net-up ## Bring up a Windows CNIv1 cluster @$(MAKE) set-kubeconf -dualstack-overlay-up: rg-up overlay-net-up ## Brings up an dualstack Overlay cluster with Linux node only - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --kubernetes-version 1.26.3 \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --network-plugin azure \ - --ip-families ipv4,ipv6 \ - --network-plugin-mode overlay \ - --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --subscription $(SUB) \ - --no-ssh-key \ - --yes - @$(MAKE) set-kubeconf - -dualstack-overlay-byocni-up: rg-up overlay-net-up ## Brings up an dualstack Overlay BYO CNI cluster - $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ - --kubernetes-version 1.26.3 \ - --node-count $(NODE_COUNT) \ - --node-vm-size $(VM_SIZE) \ - --network-plugin none \ - --network-plugin-mode overlay \ - --aks-custom-headers AKSHTTPCustomFeatures=Microsoft.ContainerService/AzureOverlayDualStackPreview \ - --ip-families ipv4,ipv6 \ - --subscription $(SUB) \ - --no-ssh-key \ - --yes - @$(MAKE) set-kubeconf - linux-cniv1-up: rg-up overlay-net-up $(AZCLI) aks create -n $(CLUSTER) -g $(GROUP) -l $(REGION) \ --node-count $(NODE_COUNT) \ diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index be923aa514..ad8135b898 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -105,7 +105,7 @@ func setupLinuxEnvironment(t *testing.T) { daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) if err != nil { - t.Fatal(err) + require.NoError(t, err) } } else { deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) @@ -115,7 +115,7 @@ func setupLinuxEnvironment(t *testing.T) { daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) if err != nil { - t.Fatal(err) + require.NoError(t, err) } } @@ -123,7 +123,7 @@ func setupLinuxEnvironment(t *testing.T) { rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) if err != nil { t.Log(os.Getwd()) - t.Fatal(err) + require.NoError(t, err) } // Fields for overwritting existing deployment yaml. @@ -144,7 +144,7 @@ func setupLinuxEnvironment(t *testing.T) { daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) if err != nil { - t.Fatal(err) + require.NoError(t, err) } t.Cleanup(func() { @@ -184,14 +184,6 @@ func setupLinuxEnvironment(t *testing.T) { } } - errFlag := apierrors.IsAlreadyExists(err) - if errFlag { - if err := k8sutils.MustDeleteDaemonset(ctx, daemonsetClient, daemonset); err != nil { - require.NoError(t, err) - } - t.Fatal("delete all goldpinger hosts and pods under default namespace if there is any error") - } - t.Log("Linux test environment ready") } From d65b07144bd5d90283c56fedb5962995f9a97800 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 12:46:40 -0400 Subject: [PATCH 30/36] fix comments --- test/integration/datapath/datapath_linux_test.go | 9 +++++++++ test/internal/k8sutils/utils_create.go | 2 +- test/internal/k8sutils/utils_delete.go | 2 +- 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index ad8135b898..f2dd2d5e17 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -163,6 +163,15 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Waiting for pods to be running state") err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { + // delete goldpinger daemonset and deployment + if err := k8sutils.MustDeleteDaemonset(ctx, daemonsetClient, daemonset); err != nil { + require.NoError(t, err) + } + + if err := k8sutils.MustDeleteDeployment(ctx, deploymentsClient, deployment); err != nil { + require.NoError(t, err) + } + require.NoError(t, err) } diff --git a/test/internal/k8sutils/utils_create.go b/test/internal/k8sutils/utils_create.go index 069da82ca0..164a00d945 100644 --- a/test/internal/k8sutils/utils_create.go +++ b/test/internal/k8sutils/utils_create.go @@ -42,7 +42,7 @@ func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn } func MustCreateDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { - if err := mustDeleteDeployment(ctx, deployments, d); err != nil { + if err := MustDeleteDeployment(ctx, deployments, d); err != nil { return err } log.Printf("Creating Deployment %v", d.Name) diff --git a/test/internal/k8sutils/utils_delete.go b/test/internal/k8sutils/utils_delete.go index 846f15b2cf..f154ff04b9 100644 --- a/test/internal/k8sutils/utils_delete.go +++ b/test/internal/k8sutils/utils_delete.go @@ -32,7 +32,7 @@ func MustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn return nil } -func mustDeleteDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { +func MustDeleteDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { if err := deployments.Delete(ctx, d.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { return err From a4c3e4162d42e49619e25fe7d160679e32b61310 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 12:47:09 -0400 Subject: [PATCH 31/36] fix comments --- test/integration/datapath/datapath_linux_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index f2dd2d5e17..ad8135b898 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -163,15 +163,6 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Waiting for pods to be running state") err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { - // delete goldpinger daemonset and deployment - if err := k8sutils.MustDeleteDaemonset(ctx, daemonsetClient, daemonset); err != nil { - require.NoError(t, err) - } - - if err := k8sutils.MustDeleteDeployment(ctx, deploymentsClient, deployment); err != nil { - require.NoError(t, err) - } - require.NoError(t, err) } From 50477d4ed54c54eac3cf5929f222658a5b7918c5 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 12:55:50 -0400 Subject: [PATCH 32/36] remove readme for dualstack --- hack/aks/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/hack/aks/README.md b/hack/aks/README.md index 1a2eeceae4..26a38bec0a 100644 --- a/hack/aks/README.md +++ b/hack/aks/README.md @@ -28,8 +28,6 @@ AKS Clusters swift-byocni-up Bring up a SWIFT BYO CNI cluster swift-cilium-up Bring up a SWIFT Cilium cluster swift-up Bring up a SWIFT AzCNI cluster - dualstack-overlay-up Brings up an dualstack overlay cluster - dualstack-overlay-byocni-up Brings up an dualstack overlay cluster without CNS and CNI installed windows-cniv1-up Bring up a Windows AzCNIv1 cluster down Delete the cluster vmss-restart Restart the nodes of the cluster From d828c04b93eb2ce095160969040e43aee63caea5 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 13:17:50 -0400 Subject: [PATCH 33/36] comment fix --- test/integration/datapath/datapath_linux_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index ad8135b898..6aa4a686b6 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -123,7 +123,7 @@ func setupLinuxEnvironment(t *testing.T) { rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) if err != nil { t.Log(os.Getwd()) - require.NoError(t, err) + t.Fatal(err) } // Fields for overwritting existing deployment yaml. @@ -163,7 +163,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Waiting for pods to be running state") err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { - require.NoError(t, err) + t.Fatalf("Pods are not in running state due to %+v", err) } if *isDualStack { From 533e4ef737d0dfbb706634417dbcf65f13342556 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 14:21:48 -0400 Subject: [PATCH 34/36] fix comments --- test/integration/datapath/datapath_linux_test.go | 2 +- test/internal/k8sutils/utils_create.go | 4 ++-- test/internal/k8sutils/utils_delete.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 6aa4a686b6..d320ad8c29 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -204,7 +204,7 @@ func TestDatapathLinux(t *testing.T) { t.Run("all pods have IPs assigned", func(t *testing.T) { err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { - require.NoError(t, err) + t.Fatalf("Pods are not in running state due to %+v", err) } t.Log("all pods have been allocated IPs") }) diff --git a/test/internal/k8sutils/utils_create.go b/test/internal/k8sutils/utils_create.go index 164a00d945..8d21af1035 100644 --- a/test/internal/k8sutils/utils_create.go +++ b/test/internal/k8sutils/utils_create.go @@ -30,7 +30,7 @@ func MustCreateOrUpdatePod(ctx context.Context, podI typedcorev1.PodInterface, p } func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { - if err := MustDeleteDaemonset(ctx, daemonsets, ds); err != nil { + if err := mustDeleteDaemonset(ctx, daemonsets, ds); err != nil { return err } log.Printf("Creating Daemonset %v", ds.Name) @@ -42,7 +42,7 @@ func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn } func MustCreateDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { - if err := MustDeleteDeployment(ctx, deployments, d); err != nil { + if err := mustDeleteDeployment(ctx, deployments, d); err != nil { return err } log.Printf("Creating Deployment %v", d.Name) diff --git a/test/internal/k8sutils/utils_delete.go b/test/internal/k8sutils/utils_delete.go index f154ff04b9..1032b406eb 100644 --- a/test/internal/k8sutils/utils_delete.go +++ b/test/internal/k8sutils/utils_delete.go @@ -22,7 +22,7 @@ func MustDeletePod(ctx context.Context, podI typedcorev1.PodInterface, pod corev return nil } -func MustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { +func mustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { if err := daemonsets.Delete(ctx, ds.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { return err @@ -32,7 +32,7 @@ func MustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn return nil } -func MustDeleteDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { +func mustDeleteDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { if err := deployments.Delete(ctx, d.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { return err From 0c9d86fde66558cc0e616300a5eb547ad0f7f365 Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 14:47:54 -0400 Subject: [PATCH 35/36] fix logs --- .../datapath/datapath_linux_test.go | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index d320ad8c29..1f06a7b45b 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -15,7 +15,6 @@ import ( k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" - "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -78,7 +77,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Create Clientset") clientset, err := k8sutils.MustGetClientset() if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + t.Fatalf("could not get k8s clientset: %v", err) } t.Log("Create Label Selectors") @@ -88,7 +87,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { - require.NoError(t, err, "could not get k8s node list: %v", err) + t.Fatalf("could not get k8s node list: %v", err) } t.Log("Creating Linux pods through deployment") @@ -100,22 +99,22 @@ func setupLinuxEnvironment(t *testing.T) { if *isDualStack { deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) if err != nil { - require.NoError(t, err) + t.Fatal(err) } daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) if err != nil { - require.NoError(t, err) + t.Fatal(err) } } else { deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) if err != nil { - require.NoError(t, err) + t.Fatal(err) } daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) if err != nil { - require.NoError(t, err) + t.Fatal(err) } } @@ -138,13 +137,13 @@ func setupLinuxEnvironment(t *testing.T) { deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) if err != nil { - require.NoError(t, err) + t.Fatal(err) } daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) if err != nil { - require.NoError(t, err) + t.Fatal(err) } t.Cleanup(func() { @@ -176,11 +175,11 @@ func setupLinuxEnvironment(t *testing.T) { for _, node := range nodes.Items { pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + t.Fatalf("could not get k8s clientset: %v", err) } if len(pods.Items) <= 1 { t.Logf("%s", node.Name) - require.NoError(t, errors.New("Less than 2 pods on node")) + t.Fatal("Less than 2 pods on node") } } From 345bac20b79e5cb6f5691fcd7d580dc3477855dc Mon Sep 17 00:00:00 2001 From: paulyufan2 Date: Wed, 26 Jul 2023 18:21:17 -0400 Subject: [PATCH 36/36] fix error --- .../datapath/datapath_linux_test.go | 1 - .../datapath/datapath_windows_test.go | 41 +++++++++++-------- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 1f06a7b45b..93116df1d6 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -178,7 +178,6 @@ func setupLinuxEnvironment(t *testing.T) { t.Fatalf("could not get k8s clientset: %v", err) } if len(pods.Items) <= 1 { - t.Logf("%s", node.Name) t.Fatal("Less than 2 pods on node") } } diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index ae27ace2ce..9a060e9cec 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -52,7 +52,7 @@ func TestDatapathWin(t *testing.T) { t.Log("Create Clientset") clientset, err := k8sutils.MustGetClientset() if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + require.NoError(t, err) } t.Log("Get REST config") restConfig := k8sutils.MustGetRestConfig(t) @@ -64,21 +64,21 @@ func TestDatapathWin(t *testing.T) { t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { - require.NoError(t, err, "could not get k8s node list: %v", err) + require.NoError(t, err) } // Create namespace if it doesn't exist namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) if err != nil { - require.NoError(t, err, "failed to check if namespace %s exists due to: %v", *podNamespace, err) + t.Fatalf("failed to check if namespace %s exists due to: %v", *podNamespace, err) } if !namespaceExists { // Test Namespace t.Log("Create Namespace") - err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + err := k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) if err != nil { - require.NoError(t, err, "failed to create pod namespace %s due to: %v", *podNamespace, err) + t.Fatalf("failed to create pod namespace %s due to: %v", *podNamespace, err) } t.Log("Creating Windows pods through deployment") @@ -102,7 +102,7 @@ func TestDatapathWin(t *testing.T) { } t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { require.NoError(t, err) } @@ -112,7 +112,7 @@ func TestDatapathWin(t *testing.T) { t.Log("Namespace already exists") t.Log("Checking for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { require.NoError(t, err) } @@ -123,10 +123,9 @@ func TestDatapathWin(t *testing.T) { pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + require.NoError(t, err) } if len(pods.Items) <= 1 { - t.Logf("%s", node.Name) require.NoError(t, errors.New("Less than 2 pods on node")) } } @@ -149,13 +148,17 @@ func TestDatapathWin(t *testing.T) { } err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIP, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to node, ping test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to node, passed for node: %s", node.Name) // windows ipv6 connectivity if nodeIPv6 != "" { - err = datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIPv6, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to node, ipv6 ping test failed with: %+v", err) + err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIPv6, *podNamespace, podLabelSelector, restConfig) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to node via ipv6, passed for node: %s", node.Name) } } @@ -167,7 +170,9 @@ func TestDatapathWin(t *testing.T) { if node.Status.NodeInfo.OperatingSystem == string(apiv1.Windows) { t.Log("Windows ping tests (2) - Same Node") err := datapath.WindowsPodToPodPingTestSameNode(ctx, clientset, node.Name, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to pod, same node, ping test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to windows pod, same node, passed for node: %s", node.ObjectMeta.Name) } } @@ -177,8 +182,10 @@ func TestDatapathWin(t *testing.T) { t.Log("Windows ping tests (2) - Different Node") firstNode := nodes.Items[i%2].Name secondNode := nodes.Items[(i+1)%2].Name - err = datapath.WindowsPodToPodPingTestDiffNode(ctx, clientset, firstNode, secondNode, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to pod, different node, ping test failed with: %+v", err) + err := datapath.WindowsPodToPodPingTestDiffNode(ctx, clientset, firstNode, secondNode, *podNamespace, podLabelSelector, restConfig) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to windows pod, different node, passed for node: %s -> %s", firstNode, secondNode) } @@ -190,7 +197,9 @@ func TestDatapathWin(t *testing.T) { if node.Status.NodeInfo.OperatingSystem == string(apiv1.Windows) { t.Log("Windows ping tests (3) - Pod to Internet tests") err := datapath.WindowsPodToInternet(ctx, clientset, node.Name, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to internet test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to Internet url tests") } }