diff --git a/hack/aks/README.md b/hack/aks/README.md index 3a4e80e4f0..26a38bec0a 100644 --- a/hack/aks/README.md +++ b/hack/aks/README.md @@ -21,14 +21,14 @@ SWIFT Infra net-up Create required swift vnet/subnets AKS Clusters - byocni-up Alias to swift-byocni-up - cilium-up Alias to swift-cilium-up - up Alias to swift-up - overlay-up Brings up an Overlay AzCNI cluster - swift-byocni-up Bring up a SWIFT BYO CNI cluster - swift-cilium-up Bring up a SWIFT Cilium cluster - swift-up Bring up a SWIFT AzCNI cluster - windows-cniv1-up Bring up a Windows AzCNIv1 cluster - down Delete the cluster - vmss-restart Restart the nodes of the cluster + byocni-up Alias to swift-byocni-up + cilium-up Alias to swift-cilium-up + up Alias to swift-up + overlay-up Brings up an Overlay AzCNI cluster + swift-byocni-up Bring up a SWIFT BYO CNI cluster + swift-cilium-up Bring up a SWIFT Cilium cluster + swift-up Bring up a SWIFT AzCNI cluster + windows-cniv1-up Bring up a Windows AzCNIv1 cluster + down Delete the cluster + vmss-restart Restart the nodes of the cluster ``` diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go new file mode 100644 index 0000000000..93116df1d6 --- /dev/null +++ b/test/integration/datapath/datapath_linux_test.go @@ -0,0 +1,297 @@ +//go:build connection + +package connection + +import ( + "context" + "flag" + "net" + "os" + "testing" + "time" + + "github.com/Azure/azure-container-networking/test/integration" + "github.com/Azure/azure-container-networking/test/integration/goldpinger" + k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/retry" + "github.com/pkg/errors" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + podLabelKey = "app" + podCount = 2 + nodepoolKey = "agentpool" + LinuxDeployIPV4 = "../manifests/datapath/linux-deployment.yaml" + LinuxDeployIPv6 = "../manifests/datapath/linux-deployment-ipv6.yaml" + maxRetryDelaySeconds = 10 + defaultTimeoutSeconds = 120 + defaultRetryDelaySeconds = 1 + goldpingerRetryCount = 24 + goldpingerDelayTimeSeconds = 5 + gpFolder = "../manifests/goldpinger" + gpClusterRolePath = gpFolder + "/cluster-role.yaml" + gpClusterRoleBindingPath = gpFolder + "/cluster-role-binding.yaml" + gpServiceAccountPath = gpFolder + "/service-account.yaml" + gpDaemonset = gpFolder + "/daemonset.yaml" + gpDaemonsetIPv6 = gpFolder + "/daemonset-ipv6.yaml" + gpDeployment = gpFolder + "/deployment.yaml" +) + +var ( + podPrefix = flag.String("podName", "goldpinger", "Prefix for test pods") + podNamespace = flag.String("namespace", "default", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "nodepool1", "Provides nodepool as a Linux Node-Selector for pods") + // TODO: add flag to support dual nic scenario + isDualStack = flag.Bool("isDualStack", false, "whether system supports dualstack scenario") + defaultRetrier = retry.Retrier{ + Attempts: 10, + Delay: defaultRetryDelaySeconds * time.Second, + } +) + +/* +This test assumes that you have the current credentials loaded in your default kubeconfig for a +k8s cluster with a Linux nodepool consisting of at least 2 Linux nodes. +*** The expected nodepool name is nodepool1, if the nodepool has a different name ensure that you change nodepoolSelector with: + -nodepoolSelector="yournodepoolname" + +To run the test use one of the following commands: +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -tags=connection,integration + or +go test -count=1 test/integration/datapath/datapath_linux_test.go -timeout 3m -tags connection -run ^TestDatapathLinux$ -podName=acnpod -nodepoolSelector=aks-pool1 -tags=connection,integration + + +This test checks pod to pod, pod to node, pod to Internet check + +Timeout context is controled by the -timeout flag. + +*/ + +func setupLinuxEnvironment(t *testing.T) { + ctx := context.Background() + + t.Log("Create Clientset") + clientset, err := k8sutils.MustGetClientset() + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + + t.Log("Create Label Selectors") + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) + + t.Log("Get Nodes") + nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + if err != nil { + t.Fatalf("could not get k8s node list: %v", err) + } + + t.Log("Creating Linux pods through deployment") + + // run goldpinger ipv4 and ipv6 test cases saperately + var daemonset appsv1.DaemonSet + var deployment appsv1.Deployment + + if *isDualStack { + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) + if err != nil { + t.Fatal(err) + } + + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) + if err != nil { + t.Fatal(err) + } + } else { + deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) + if err != nil { + t.Fatal(err) + } + + daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) + if err != nil { + t.Fatal(err) + } + } + + // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount + rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + if err != nil { + t.Log(os.Getwd()) + t.Fatal(err) + } + + // Fields for overwritting existing deployment yaml. + // Defaults from flags will not change anything + deployment.Spec.Selector.MatchLabels[podLabelKey] = *podPrefix + deployment.Spec.Template.ObjectMeta.Labels[podLabelKey] = *podPrefix + deployment.Spec.Template.Spec.NodeSelector[nodepoolKey] = *nodepoolSelector + deployment.Name = *podPrefix + deployment.Namespace = *podNamespace + daemonset.Namespace = *podNamespace + + deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) + err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + if err != nil { + t.Fatal(err) + } + + daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) + err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + if err != nil { + t.Fatal(err) + } + + t.Cleanup(func() { + t.Log("cleaning up resources") + rbacSetupFn() + + if err := deploymentsClient.Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + + if err := daemonsetClient.Delete(ctx, daemonset.Name, metav1.DeleteOptions{}); err != nil { + t.Log(err) + } + }) + + t.Log("Waiting for pods to be running state") + err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + + if *isDualStack { + t.Log("Successfully created customer dualstack Linux pods") + } else { + t.Log("Successfully created customer singlestack Linux pods") + } + + t.Log("Checking Linux test environment") + for _, node := range nodes.Items { + pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + if err != nil { + t.Fatalf("could not get k8s clientset: %v", err) + } + if len(pods.Items) <= 1 { + t.Fatal("Less than 2 pods on node") + } + } + + t.Log("Linux test environment ready") +} + +func TestDatapathLinux(t *testing.T) { + ctx := context.Background() + + t.Log("Get REST config") + restConfig := k8sutils.MustGetRestConfig(t) + + t.Log("Create Clientset") + clientset, _ := k8sutils.MustGetClientset() + + setupLinuxEnvironment(t) + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + + t.Run("Linux ping tests", func(t *testing.T) { + // Check goldpinger health + t.Run("all pods have IPs assigned", func(t *testing.T) { + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + if err != nil { + t.Fatalf("Pods are not in running state due to %+v", err) + } + t.Log("all pods have been allocated IPs") + }) + + if *isDualStack { + t.Run("Linux dualstack overlay tests", func(t *testing.T) { + t.Run("test dualstack overlay", func(t *testing.T) { + podsClient := clientset.CoreV1().Pods(*podNamespace) + + checkPodIPsFn := func() error { + podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: "app=goldpinger"}) + if err != nil { + return err + } + + for _, pod := range podList.Items { + podIPs := pod.Status.PodIPs + if len(podIPs) < 2 { + return errors.New("a pod only gets one IP") + } + if net.ParseIP(podIPs[0].IP).To4() == nil || net.ParseIP(podIPs[1].IP).To16() == nil { + return errors.New("a pod does not have both ipv4 and ipv6 address") + } + } + return nil + } + err := defaultRetrier.Do(ctx, checkPodIPsFn) + if err != nil { + t.Fatalf("dualstack overlay pod properties check is failed due to: %v", err) + } + + t.Log("all dualstack linux pods properties have been verified") + }) + }) + } + + t.Run("all linux pods can ping each other", func(t *testing.T) { + clusterCheckCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) + defer cancel() + + pfOpts := k8s.PortForwardingOpts{ + Namespace: *podNamespace, + LabelSelector: podLabelSelector, + LocalPort: 9090, + DestPort: 8080, + } + + pf, err := k8s.NewPortForwarder(restConfig, t, pfOpts) + if err != nil { + t.Fatal(err) + } + + portForwardCtx, cancel := context.WithTimeout(ctx, defaultTimeoutSeconds*time.Second) + defer cancel() + + portForwardFn := func() error { + err := pf.Forward(portForwardCtx) + if err != nil { + t.Logf("unable to start port forward: %v", err) + return err + } + return nil + } + + if err := defaultRetrier.Do(portForwardCtx, portForwardFn); err != nil { + t.Fatalf("could not start port forward within %d: %v", defaultTimeoutSeconds, err) + } + defer pf.Stop() + + gpClient := goldpinger.Client{Host: pf.Address()} + clusterCheckFn := func() error { + clusterState, err := gpClient.CheckAll(clusterCheckCtx) + if err != nil { + return err + } + stats := goldpinger.ClusterStats(clusterState) + stats.PrintStats() + if stats.AllPingsHealthy() { + return nil + } + + return errors.New("not all pings are healthy") + } + retrier := retry.Retrier{Attempts: goldpingerRetryCount, Delay: goldpingerDelayTimeSeconds * time.Second} + if err := retrier.Do(clusterCheckCtx, clusterCheckFn); err != nil { + t.Fatalf("goldpinger pods network health could not reach healthy state after %d seconds: %v", goldpingerRetryCount*goldpingerDelayTimeSeconds, err) + } + + t.Log("all pings successful!") + }) + }) +} diff --git a/test/integration/datapath/datapath_win_test.go b/test/integration/datapath/datapath_windows_test.go similarity index 68% rename from test/integration/datapath/datapath_win_test.go rename to test/integration/datapath/datapath_windows_test.go index 054a60bb98..9a060e9cec 100644 --- a/test/integration/datapath/datapath_win_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -5,7 +5,7 @@ package connection import ( "context" "flag" - "fmt" + "net" "testing" "github.com/Azure/azure-container-networking/test/internal/datapath" @@ -13,7 +13,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" ) const ( @@ -25,8 +24,8 @@ const ( var ( podPrefix = flag.String("podName", "datapod", "Prefix for test pods") - podNamespace = flag.String("namespace", "datapath-win", "Namespace for test pods") - nodepoolSelector = flag.String("nodepoolSelector", "npwin", "Provides nodepool as a Node-Selector for pods") + podNamespace = flag.String("namespace", "windows-datapath-test", "Namespace for test pods") + nodepoolSelector = flag.String("nodepoolSelector", "npwin", "Provides nodepool as a windows Node-Selector for pods") ) /* @@ -36,9 +35,9 @@ k8s cluster with a windows nodepool consisting of at least 2 windows nodes. -nodepoolSelector="yournodepoolname" To run the test use one of the following commands: -go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection +go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -tags=connection or -go test -count=1 test/integration/datapath/datapath_win_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection +go test -count=1 test/integration/datapath/datapath_windows_test.go -timeout 3m -tags connection -run ^TestDatapathWin$ -podName=acnpod -nodepoolSelector=npwina -tags=connection This test checks pod to pod, pod to node, and pod to internet for datapath connectivity. @@ -53,27 +52,35 @@ func TestDatapathWin(t *testing.T) { t.Log("Create Clientset") clientset, err := k8sutils.MustGetClientset() if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + require.NoError(t, err) } t.Log("Get REST config") restConfig := k8sutils.MustGetRestConfig(t) t.Log("Create Label Selectors") - podLabelSelector := fmt.Sprintf("%s=%s", podLabelKey, *podPrefix) - nodeLabelSelector := fmt.Sprintf("%s=%s", nodepoolKey, *nodepoolSelector) + podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { - require.NoError(t, err, "could not get k8s node list: %v", err) + require.NoError(t, err) } - // Test Namespace - t.Log("Create Namespace") - err = k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) - createPodFlag := !(apierrors.IsAlreadyExists(err)) + // Create namespace if it doesn't exist + namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) + if err != nil { + t.Fatalf("failed to check if namespace %s exists due to: %v", *podNamespace, err) + } + + if !namespaceExists { + // Test Namespace + t.Log("Create Namespace") + err := k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + if err != nil { + t.Fatalf("failed to create pod namespace %s due to: %v", *podNamespace, err) + } - if createPodFlag { t.Log("Creating Windows pods through deployment") deployment, err := k8sutils.MustParseDeployment(WindowsDeployYamlPath) if err != nil { @@ -95,7 +102,7 @@ func TestDatapathWin(t *testing.T) { } t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { require.NoError(t, err) } @@ -105,20 +112,20 @@ func TestDatapathWin(t *testing.T) { t.Log("Namespace already exists") t.Log("Checking for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { require.NoError(t, err) } } - t.Log("Checking Windows test environment ") + + t.Log("Checking Windows test environment") for _, node := range nodes.Items { pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { - require.NoError(t, err, "could not get k8s clientset: %v", err) + require.NoError(t, err) } if len(pods.Items) <= 1 { - t.Logf("%s", node.Name) require.NoError(t, errors.New("Less than 2 pods on node")) } } @@ -129,18 +136,31 @@ func TestDatapathWin(t *testing.T) { for _, node := range nodes.Items { t.Log("Windows ping tests (1)") nodeIP := "" + nodeIPv6 := "" for _, address := range node.Status.Addresses { if address.Type == "InternalIP" { nodeIP = address.Address - // Multiple addresses exist, break once Internal IP found. - // Cannot call directly + if net.ParseIP(address.Address).To16() != nil { + nodeIPv6 = address.Address + } break } } err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIP, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to node, ping test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to node, passed for node: %s", node.Name) + + // windows ipv6 connectivity + if nodeIPv6 != "" { + err := datapath.WindowsPodToNode(ctx, clientset, node.Name, nodeIPv6, *podNamespace, podLabelSelector, restConfig) + if err != nil { + require.NoError(t, err) + } + t.Logf("Windows pod to node via ipv6, passed for node: %s", node.Name) + } } }) @@ -150,7 +170,9 @@ func TestDatapathWin(t *testing.T) { if node.Status.NodeInfo.OperatingSystem == string(apiv1.Windows) { t.Log("Windows ping tests (2) - Same Node") err := datapath.WindowsPodToPodPingTestSameNode(ctx, clientset, node.Name, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to pod, same node, ping test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to windows pod, same node, passed for node: %s", node.ObjectMeta.Name) } } @@ -160,8 +182,10 @@ func TestDatapathWin(t *testing.T) { t.Log("Windows ping tests (2) - Different Node") firstNode := nodes.Items[i%2].Name secondNode := nodes.Items[(i+1)%2].Name - err = datapath.WindowsPodToPodPingTestDiffNode(ctx, clientset, firstNode, secondNode, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to pod, different node, ping test failed with: %+v", err) + err := datapath.WindowsPodToPodPingTestDiffNode(ctx, clientset, firstNode, secondNode, *podNamespace, podLabelSelector, restConfig) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to windows pod, different node, passed for node: %s -> %s", firstNode, secondNode) } @@ -173,7 +197,9 @@ func TestDatapathWin(t *testing.T) { if node.Status.NodeInfo.OperatingSystem == string(apiv1.Windows) { t.Log("Windows ping tests (3) - Pod to Internet tests") err := datapath.WindowsPodToInternet(ctx, clientset, node.Name, *podNamespace, podLabelSelector, restConfig) - require.NoError(t, err, "Windows pod to internet test failed with: %+v", err) + if err != nil { + require.NoError(t, err) + } t.Logf("Windows pod to Internet url tests") } } diff --git a/test/integration/goldpinger/client.go b/test/integration/goldpinger/client.go index 49b29d9686..dac4149ced 100644 --- a/test/integration/goldpinger/client.go +++ b/test/integration/goldpinger/client.go @@ -1,3 +1,4 @@ +//go:build integration // +build integration package goldpinger diff --git a/test/integration/manifests/datapath/linux-deployment-ipv6.yaml b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml new file mode 100644 index 0000000000..e20de1df91 --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment-ipv6.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: linux-datapath-test +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: IP_VERSIONS + value: "6" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "2001:4860:4860::8888 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/datapath/linux-deployment.yaml b/test/integration/manifests/datapath/linux-deployment.yaml new file mode 100644 index 0000000000..1e4d3433cc --- /dev/null +++ b/test/integration/manifests/datapath/linux-deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: goldpinger-deploy + namespace: linux-datapath-test +spec: + replicas: 4 + selector: + matchLabels: + app: goldpinger + template: + metadata: + labels: + app: goldpinger + spec: + containers: + - name: goldpinger + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: DNS_TARGETS_TIMEOUT + value: "10s" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "1.1.1.1 8.8.8.8 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + serviceAccount: goldpinger-serviceaccount + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + securityContext: + allowPrivilegeEscalation: false + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + resources: + limits: + memory: 80Mi + requests: + cpu: 1m + memory: 40Mi + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/goldpinger/cluster-role-binding.yaml b/test/integration/manifests/goldpinger/cluster-role-binding.yaml index c7c22e9bb3..e18b186a12 100644 --- a/test/integration/manifests/goldpinger/cluster-role-binding.yaml +++ b/test/integration/manifests/goldpinger/cluster-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: goldpinger-clusterrolebinding diff --git a/test/integration/manifests/goldpinger/daemonset-ipv6.yaml b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml new file mode 100644 index 0000000000..f2eaa0de03 --- /dev/null +++ b/test/integration/manifests/goldpinger/daemonset-ipv6.yaml @@ -0,0 +1,82 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: goldpinger-host + namespace: default +spec: + selector: + matchLabels: + app: goldpinger + type: goldpinger-host + template: + metadata: + labels: + app: goldpinger + type: goldpinger-host + spec: + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Exists + hostNetwork: true + serviceAccount: "goldpinger-serviceaccount" + securityContext: + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: "app" + operator: In + values: + - goldpinger + topologyKey: "kubernetes.io/hostname" + containers: + - name: goldpinger-vm + env: + - name: HOST + value: "0.0.0.0" + - name: PORT + value: "8080" + - name: PING_TIMEOUT + value: "10s" + - name: CHECK_TIMEOUT + value: "20s" + - name: CHECK_ALL_TIMEOUT + value: "20s" + - name: IP_VERSIONS + value: "6" + - name: HOSTNAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: HOSTS_TO_RESOLVE + value: "2001:4860:4860::8888 www.bing.com" + image: "docker.io/bloomberg/goldpinger:v3.7.0" + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ports: + - containerPort: 8080 + name: http + readinessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + initialDelaySeconds: 5 + periodSeconds: 5 \ No newline at end of file diff --git a/test/integration/manifests/goldpinger/daemonset.yaml b/test/integration/manifests/goldpinger/daemonset.yaml index ba77fa58ae..41a86cc3f0 100644 --- a/test/integration/manifests/goldpinger/daemonset.yaml +++ b/test/integration/manifests/goldpinger/daemonset.yaml @@ -77,4 +77,4 @@ spec: path: /healthz port: 8080 initialDelaySeconds: 5 - periodSeconds: 5 + periodSeconds: 5 \ No newline at end of file diff --git a/test/integration/manifests/load/privileged-daemonset.yaml b/test/integration/manifests/load/privileged-daemonset.yaml index 9bacdc4ebe..6448f56980 100644 --- a/test/integration/manifests/load/privileged-daemonset.yaml +++ b/test/integration/manifests/load/privileged-daemonset.yaml @@ -26,13 +26,25 @@ spec: volumeMounts: - mountPath: /var/run/azure-cns name: azure-cns + - mountPath: /var/run/azure-network + name: azure-network - mountPath: /host name: host-root + - mountPath: /var/run + name: azure-cns-noncilium volumes: - name: azure-cns hostPath: path: /var/run/azure-cns + - name: azure-network + hostPath: + path: /var/run/azure-network + - name: azure-cns-noncilium + hostPath: + path: /var/run - hostPath: path: / type: "" name: host-root + nodeSelector: + kubernetes.io/os: linux \ No newline at end of file diff --git a/test/integration/manifests/noop-deployment-linux.yaml b/test/integration/manifests/noop-deployment-linux.yaml index 6b12793189..4d4acd89c2 100644 --- a/test/integration/manifests/noop-deployment-linux.yaml +++ b/test/integration/manifests/noop-deployment-linux.yaml @@ -20,4 +20,4 @@ spec: securityContext: privileged: true nodeSelector: - "kubernetes.io/os": linux + kubernetes.io/os: linux diff --git a/test/integration/manifests/noop-deployment-windows.yaml b/test/integration/manifests/noop-deployment-windows.yaml index 3b35f044dc..7d6f5ef035 100644 --- a/test/integration/manifests/noop-deployment-windows.yaml +++ b/test/integration/manifests/noop-deployment-windows.yaml @@ -20,4 +20,4 @@ spec: ports: - containerPort: 80 nodeSelector: - "kubernetes.io/os": windows + kubernetes.io/os: windows diff --git a/test/internal/datapath/datapath_win.go b/test/internal/datapath/datapath_win.go index 54a317760b..d59bb53f69 100644 --- a/test/internal/datapath/datapath_win.go +++ b/test/internal/datapath/datapath_win.go @@ -3,6 +3,7 @@ package datapath import ( "context" "fmt" + "net" "strings" "github.com/Azure/azure-container-networking/test/internal/k8sutils" @@ -14,6 +15,8 @@ import ( restclient "k8s.io/client-go/rest" ) +var ipv6PrefixPolicy = []string{"curl", "-6", "-I", "-v", "www.bing.com"} + func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error { logrus.Infof("podTest() - %v %v", srcPod.Name, cmd) output, err := k8sutils.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) @@ -48,8 +51,27 @@ func WindowsPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes. } logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) + // ipv4 ping test // Ping the second pod from the first pod - return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + resultOne := podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + if resultOne != nil { + return resultOne + } + + // ipv6 ping test + // ipv6 Ping the second pod from the first pod + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultTwo := podTest(ctx, clientset, firstPod, []string{"ping", ip.IP}, rc, pingPassedWindows) + if resultTwo != nil { + return resultTwo + } + } + } + } + + return nil } func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName1, nodeName2, podNamespace, labelSelector string, rc *restclient.Config) error { @@ -80,7 +102,23 @@ func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes. logrus.Infof("Second pod: %v %v", secondPod.Name, secondPod.Status.PodIP) // Ping the second pod from the first pod located on different nodes - return podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + resultOne := podTest(ctx, clientset, firstPod, []string{"ping", secondPod.Status.PodIP}, rc, pingPassedWindows) + if resultOne != nil { + return resultOne + } + + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultTwo := podTest(ctx, clientset, firstPod, []string{"ping ", ip.IP}, rc, pingPassedWindows) + if resultTwo != nil { + return resultTwo + } + } + } + } + + return nil } func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, nodeIP, podNamespace, labelSelector string, rc *restclient.Config) error { @@ -158,6 +196,21 @@ func WindowsPodToInternet(ctx context.Context, clientset *kubernetes.Clientset, return resultTwo } + // test Invoke-WebRequest an URL by IPv6 address on one pod + // command is: C:\inetpub\wwwroot>curl -6 -I -v www.bing.com + // then return * Trying [2620:1ec:c11::200]:80... + // HTTP/1.1 200 OK + if len(secondPod.Status.PodIPs) > 1 { + for _, ip := range secondPod.Status.PodIPs { + if net.ParseIP(ip.IP).To16() != nil { + resultThree := podTest(ctx, clientset, secondPod, ipv6PrefixPolicy, rc, webRequestPassedWindows) + if resultThree != nil { + return resultThree + } + } + } + } + return nil } diff --git a/test/internal/k8sutils/utils.go b/test/internal/k8sutils/utils.go index 4174595751..7697e43c31 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/k8sutils/utils.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "flag" + "fmt" "io" "log" "os" @@ -71,7 +72,6 @@ func mustParseResource(path string, out interface{}) error { if err := yaml.NewYAMLOrJSONDecoder(f, 0).Decode(out); err != nil { return err } - return err } @@ -374,3 +374,8 @@ func NamespaceExists(ctx context.Context, clientset *kubernetes.Clientset, names } return true, nil } + +// return a label selector +func CreateLabelSelector(key string, selector *string) string { + return fmt.Sprintf("%s=%s", key, *selector) +} diff --git a/test/internal/k8sutils/utils_get.go b/test/internal/k8sutils/utils_get.go index 531ec38fce..6c1ff2b0e6 100644 --- a/test/internal/k8sutils/utils_get.go +++ b/test/internal/k8sutils/utils_get.go @@ -43,9 +43,11 @@ func GetPodsIpsByNode(ctx context.Context, clientset *kubernetes.Clientset, name if err != nil { return nil, err } - ips := make([]string, 0, len(pods.Items)) + ips := make([]string, 0, len(pods.Items)*2) //nolint for index := range pods.Items { - ips = append(ips, pods.Items[index].Status.PodIP) + for _, podIP := range pods.Items[index].Status.PodIPs { + ips = append(ips, podIP.IP) + } } return ips, nil } diff --git a/test/validate/linux_validate.go b/test/validate/linux_validate.go index d2839f4098..99fe2a0e3e 100644 --- a/test/validate/linux_validate.go +++ b/test/validate/linux_validate.go @@ -91,20 +91,20 @@ func (l *LinuxClient) CreateClient(ctx context.Context, clienset *kubernetes.Cli // Todo: Based on cni version validate different state files func (v *LinuxValidator) ValidateStateFile() error { - checks := []struct { - name string - stateFileIps func([]byte) (map[string]string, error) - podLabelSelector string - podNamespace string - cmd []string - }{ + checkSet := make(map[string][]check) // key is cni type, value is a list of check + // TODO: add cniv1 when adding Linux related test cases + checkSet["cilium"] = []check{ {"cns", cnsStateFileIps, cnsLabelSelector, privilegedNamespace, cnsStateFileCmd}, {"cilium", ciliumStateFileIps, ciliumLabelSelector, privilegedNamespace, ciliumStateFileCmd}, {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, } - for _, check := range checks { - err := v.validate(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) + checkSet["cniv2"] = []check{ + {"cns cache", cnsCacheStateFileIps, cnsLabelSelector, privilegedNamespace, cnsLocalCacheCmd}, + } + + for _, check := range checkSet[v.cni] { + err := v.validateIPs(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) if err != nil { return err } @@ -191,7 +191,7 @@ func cnsCacheStateFileIps(result []byte) (map[string]string, error) { return cnsPodIps, nil } -func (v *LinuxValidator) validate(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { +func (v *LinuxValidator) validateIPs(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { log.Printf("Validating %s state file", checkType) nodes, err := k8sutils.GetNodeList(v.ctx, v.clientset) if err != nil { diff --git a/test/validate/utils.go b/test/validate/utils.go index 7180c7bc66..4c81fe145a 100644 --- a/test/validate/utils.go +++ b/test/validate/utils.go @@ -2,6 +2,7 @@ package validate import ( "context" + "reflect" "github.com/Azure/azure-container-networking/test/internal/k8sutils" corev1 "k8s.io/api/core/v1" @@ -29,11 +30,30 @@ func getPodIPsWithoutNodeIP(ctx context.Context, clientset *kubernetes.Clientset if err != nil { return podsIpsWithoutNodeIP } - nodeIP := node.Status.Addresses[0].Address + nodeIPs := make([]string, 0) + for _, address := range node.Status.Addresses { + if address.Type == corev1.NodeInternalIP { + nodeIPs = append(nodeIPs, address.Address) + } + } + for _, podIP := range podIPs { - if podIP != nodeIP { + if !contain(podIP, nodeIPs) { podsIpsWithoutNodeIP = append(podsIpsWithoutNodeIP, podIP) } } return podsIpsWithoutNodeIP } + +func contain(obj, target interface{}) bool { + targetValue := reflect.ValueOf(target) + switch reflect.TypeOf(target).Kind() { //nolint + case reflect.Slice, reflect.Array: + for i := 0; i < targetValue.Len(); i++ { + if targetValue.Index(i).Interface() == obj { + return true + } + } + } + return false +} diff --git a/test/validate/windows_validate.go b/test/validate/windows_validate.go index 9e54f61bef..ab9a0f2aed 100644 --- a/test/validate/windows_validate.go +++ b/test/validate/windows_validate.go @@ -18,7 +18,7 @@ const ( ) var ( - hnsEndPpointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} + hnsEndPointCmd = []string{"powershell", "-c", "Get-HnsEndpoint | ConvertTo-Json"} azureVnetCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet.json"} azureVnetIpamCmd = []string{"powershell", "-c", "cat ../../k/azure-vnet-ipam.json"} ) @@ -78,6 +78,14 @@ type AddressRecord struct { InUse bool } +type check struct { + name string + stateFileIps func([]byte) (map[string]string, error) + podLabelSelector string + podNamespace string + cmd []string +} + func (w *WindowsClient) CreateClient(ctx context.Context, clienset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool) IValidator { // deploy privileged pod privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedWindowsDaemonSetPath) @@ -106,24 +114,26 @@ func (w *WindowsClient) CreateClient(ctx context.Context, clienset *kubernetes.C } func (v *WindowsValidator) ValidateStateFile() error { - checks := []struct { - name string - stateFileIps func([]byte) (map[string]string, error) - podLabelSelector string - podNamespace string - cmd []string - }{ - {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPpointCmd}, + checkSet := make(map[string][]check) // key is cni type, value is a list of check + + checkSet["cniv1"] = []check{ + {"hns", hnsStateFileIps, privilegedLabelSelector, privilegedNamespace, hnsEndPointCmd}, {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, {"azure-vnet-ipam", azureVnetIpamIps, privilegedLabelSelector, privilegedNamespace, azureVnetIpamCmd}, } - for _, check := range checks { - err := v.validate(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) + checkSet["cniv2"] = []check{ + {"azure-vnet", azureVnetIps, privilegedLabelSelector, privilegedNamespace, azureVnetCmd}, + } + + // this is checking all IPs of the pods with the statefile + for _, check := range checkSet[v.cni] { + err := v.validateIPs(check.stateFileIps, check.cmd, check.name, check.podNamespace, check.podLabelSelector) if err != nil { return err } } + return nil } @@ -184,7 +194,7 @@ func azureVnetIpamIps(result []byte) (map[string]string, error) { return azureVnetIpamPodIps, nil } -func (v *WindowsValidator) validate(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { +func (v *WindowsValidator) validateIPs(stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { log.Println("Validating ", checkType, " state file") nodes, err := k8sutils.GetNodeListByLabelSelector(v.ctx, v.clientset, windowsNodeSelector) if err != nil {