From 790cfdc3fe486d034be5aec1ceb06d3283408802 Mon Sep 17 00:00:00 2001 From: Hongliang Liu Date: Sun, 5 Dec 2021 16:15:28 +0800 Subject: [PATCH] Flexible pipeline #6 Test Signed-off-by: Hongliang Liu --- pkg/agent/openflow/client_test.go | 44 +++- test/e2e/basic_test.go | 5 +- test/e2e/framework.go | 5 + test/e2e/performance_test.go | 12 +- test/e2e/proxy_test.go | 293 ++++++++++++++++++------ test/e2e/traceflow_test.go | 71 +++--- test/integration/agent/openflow_test.go | 252 +++++++++++--------- test/integration/ovs/ofctrl_test.go | 69 +++--- 8 files changed, 486 insertions(+), 265 deletions(-) diff --git a/pkg/agent/openflow/client_test.go b/pkg/agent/openflow/client_test.go index bd7b5f91176..116bbe6586a 100644 --- a/pkg/agent/openflow/client_test.go +++ b/pkg/agent/openflow/client_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - "antrea.io/ofnet/ofctrl" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -64,7 +63,7 @@ func installNodeFlows(ofClient Client, cacheKey string) (int, error) { } err := ofClient.InstallNodeFlows(hostName, peerConfigs, &utilip.DualStackIPs{IPv4: peerNodeIP}, 0, nil) client := ofClient.(*client) - fCacheI, ok := client.nodeFlowCache.Load(hostName) + fCacheI, ok := client.featurePodConnectivity.nodeFlowCache.Load(hostName) if ok { return len(fCacheI.(flowCache)), err } @@ -78,7 +77,7 @@ func installPodFlows(ofClient Client, cacheKey string) (int, error) { ofPort := uint32(10) err := ofClient.InstallPodFlows(containerID, []net.IP{podIP}, podMAC, ofPort) client := ofClient.(*client) - fCacheI, ok := client.podFlowCache.Load(containerID) + fCacheI, ok := client.featurePodConnectivity.podFlowCache.Load(containerID) if ok { return len(fCacheI.(flowCache)), err } @@ -109,6 +108,10 @@ func TestIdempotentFlowInstallation(t *testing.T) { client.ofEntryOperations = m client.nodeConfig = nodeConfig client.networkConfig = networkConfig + templatesList := client.initializeFeatures(true) + for _, templates := range templatesList { + generatePipeline(templates) + } m.EXPECT().AddAll(gomock.Any()).Return(nil).Times(1) // Installing the flows should succeed, and all the flows should be added into the cache. @@ -138,6 +141,10 @@ func TestIdempotentFlowInstallation(t *testing.T) { client.ofEntryOperations = m client.nodeConfig = nodeConfig client.networkConfig = networkConfig + templatesList := client.initializeFeatures(true) + for _, templates := range templatesList { + generatePipeline(templates) + } errorCall := m.EXPECT().AddAll(gomock.Any()).Return(errors.New("Bundle error")).Times(1) m.EXPECT().AddAll(gomock.Any()).Return(nil).After(errorCall) @@ -180,6 +187,10 @@ func TestFlowInstallationFailed(t *testing.T) { client.ofEntryOperations = m client.nodeConfig = nodeConfig client.networkConfig = networkConfig + templatesList := client.initializeFeatures(true) + for _, templates := range templatesList { + generatePipeline(templates) + } // We generate an error for AddAll call. m.EXPECT().AddAll(gomock.Any()).Return(errors.New("Bundle error")) @@ -215,6 +226,10 @@ func TestConcurrentFlowInstallation(t *testing.T) { client.ofEntryOperations = m client.nodeConfig = nodeConfig client.networkConfig = networkConfig + templatesList := client.initializeFeatures(true) + for _, templates := range templatesList { + generatePipeline(templates) + } var concurrentCalls atomic.Value // set to true if we observe concurrent calls timeoutCh := make(chan struct{}) @@ -258,9 +273,6 @@ func TestConcurrentFlowInstallation(t *testing.T) { } func Test_client_InstallTraceflowFlows(t *testing.T) { - type ofSwitch struct { - ofctrl.OFSwitch - } type fields struct { } type args struct { @@ -404,16 +416,24 @@ func prepareTraceflowFlow(ctrl *gomock.Controller) *client { c := ofClient.(*client) c.cookieAllocator = cookie.NewAllocator(0) c.nodeConfig = nodeConfig - m := ovsoftest.NewMockBridge(ctrl) - m.EXPECT().AddFlowsInBundle(gomock.Any(), nil, nil).Return(nil).Times(1) - c.bridge = m + m := oftest.NewMockOFEntryOperations(ctrl) + c.ofEntryOperations = m + c.nodeConfig = nodeConfig + c.networkConfig = networkConfig + templatesList := c.initializeFeatures(true) + for _, templates := range templatesList { + generatePipeline(templates) + } + + m.EXPECT().AddAll(gomock.Any()).Return(nil).Times(1) + c.bridge = ovsoftest.NewMockBridge(ctrl) mFlow := ovsoftest.NewMockFlow(ctrl) ctx := &conjMatchFlowContext{dropFlow: mFlow} mFlow.EXPECT().FlowProtocol().Return(binding.Protocol("ip")) - mFlow.EXPECT().CopyToBuilder(priorityNormal+2, false).Return(EgressDefaultTable.BuildFlow(priorityNormal + 2)).Times(1) - c.globalConjMatchFlowCache["mockContext"] = ctx - c.policyCache.Add(&policyRuleConjunction{metricFlows: []binding.Flow{c.denyRuleMetricFlow(123, false)}}) + mFlow.EXPECT().CopyToBuilder(priorityNormal+2, false).Return(EgressDefaultTable.ofTable.BuildFlow(priorityNormal + 2)).Times(1) + c.featureNetworkPolicy.globalConjMatchFlowCache["mockContext"] = ctx + c.featureNetworkPolicy.policyCache.Add(&policyRuleConjunction{metricFlows: []binding.Flow{c.featureNetworkPolicy.denyRuleMetricFlow(123, false)}}) return c } diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index 6d78132e5b4..5b9be413534 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -399,6 +399,9 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo continue } route := Route{} + if net.ParseIP(matches[1]) != nil { + matches[1] = fmt.Sprintf("%s/32", matches[1]) + } if _, route.peerPodCIDR, err = net.ParseCIDR(matches[1]); err != nil { return nil, fmt.Errorf("%s is not a valid net CIDR", matches[1]) } @@ -762,7 +765,7 @@ func testGratuitousARP(t *testing.T, data *TestData, namespace string) { // be sent 100ms after processing CNI ADD request. time.Sleep(100 * time.Millisecond) - cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=10,arp,arp_spa=%s", podIP.ipv4.String())} + cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=ARPSpoofGuard,arp,arp_spa=%s", podIP.ipv4.String())} stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("Error when querying openflow: %v", err) diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 35f4982569c..8459f4d9f61 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -1494,6 +1494,11 @@ func (data *TestData) createNginxClusterIPService(name, namespace string, affini return data.createService(name, namespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) } +// createAgnhostClusterIPService creates a ClusterIP agnhost service with the given name. +func (data *TestData) createAgnhostClusterIPService(serviceName string, affinity bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { + return data.createService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) +} + // createAgnhostNodePortService creates a NodePort agnhost service with the given name. func (data *TestData) createAgnhostNodePortService(serviceName string, affinity, nodeLocalExternal bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { return data.createService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) diff --git a/test/e2e/performance_test.go b/test/e2e/performance_test.go index f2f1033cc0c..c09c8418ebf 100644 --- a/test/e2e/performance_test.go +++ b/test/e2e/performance_test.go @@ -28,6 +28,8 @@ import ( networkv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + + "antrea.io/antrea/pkg/agent/openflow" ) const ( @@ -283,17 +285,17 @@ func WaitNetworkPolicyRealize(policyRules int, data *TestData) error { // checkRealize checks if all CIDR rules in the Network Policy have been realized as OVS flows. It counts the number of // flows installed in the ingressRuleTable of the OVS bridge of the control-plane Node. This relies on the implementation -// knowledge that given a single ingress policy, the Antrea agent will install exactly one flow per CIDR rule in table 90. -// checkRealize returns true when the number of flows exceeds the number of CIDR, because each table has a default flow -// entry which is used for default matching. +// knowledge that given a single ingress policy, the Antrea agent will install exactly one flow per CIDR rule in table +// IngressRule. checkRealize returns true when the number of flows exceeds the number of CIDR, because each table has a +// default flow entry which is used for default matching. // Since the check is done over SSH, the time measurement is not completely accurate. func checkRealize(policyRules int, data *TestData) (bool, error) { antreaPodName, err := data.getAntreaPodOnNode(controlPlaneNodeName()) if err != nil { return false, err } - // table 90 is the ingressRuleTable where the rules in workload network policy is being applied to. - cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, "table=90"} + // table IngressRule is the ingressRuleTable where the rules in workload network policy is being applied to. + cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", openflow.IngressRuleTable.GetName())} stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) if err != nil { return false, err diff --git a/test/e2e/proxy_test.go b/test/e2e/proxy_test.go index 3e0c9567573..0a1f5665e56 100644 --- a/test/e2e/proxy_test.go +++ b/test/e2e/proxy_test.go @@ -27,12 +27,13 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "antrea.io/antrea/pkg/agent/config" "antrea.io/antrea/pkg/features" ) type expectTableFlows struct { - tableID int - flows []string + tableName string + flows []string } // TestProxy is the top-level test which contains all subtests for @@ -51,9 +52,6 @@ func TestProxy(t *testing.T) { skipIfProviderIs(t, "kind", "#881 Does not work in Kind, needs to be investigated.") testProxyServiceSessionAffinityCase(t, data) }) - t.Run("testProxyHairpinCase", func(t *testing.T) { - testProxyHairpinCase(t, data) - }) t.Run("testProxyEndpointLifeCycleCase", func(t *testing.T) { testProxyEndpointLifeCycleCase(t, data) }) @@ -115,20 +113,20 @@ func probeClientIPFromNode(node string, baseUrl string) (string, error) { return host, err } -func probeFromPod(data *TestData, pod string, url string) error { - _, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1"}) +func probeFromPod(data *TestData, pod, container string, url string) error { + _, _, err := data.runCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "1"}) return err } -func probeHostnameFromPod(data *TestData, pod string, baseUrl string) (string, error) { +func probeHostnameFromPod(data *TestData, pod, container string, baseUrl string) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "hostname") - hostname, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1"}) + hostname, _, err := data.runCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "1"}) return hostname, err } -func probeClientIPFromPod(data *TestData, pod string, baseUrl string) (string, error) { +func probeClientIPFromPod(data *TestData, pod, container string, baseUrl string) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "clientip") - hostPort, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1"}) + hostPort, _, err := data.runCommandFromPod(testNamespace, pod, container, []string{"wget", "-O", "-", url, "-T", "1"}) if err != nil { return "", err } @@ -234,7 +232,7 @@ func testLoadBalancerClusterFromNode(t *testing.T, data *TestData, nodes []strin func testLoadBalancerClusterFromPod(t *testing.T, data *TestData, pods []string, url string) { for _, pod := range pods { - require.NoError(t, probeFromPod(data, pod, url), "Service LoadBalancer whose externalTrafficPolicy is Cluster should be able to be connected from Pod") + require.NoError(t, probeFromPod(data, pod, busyboxContainerName, url), "Service LoadBalancer whose externalTrafficPolicy is Cluster should be able to be connected from Pod") } } @@ -250,11 +248,11 @@ func testLoadBalancerLocalFromNode(t *testing.T, data *TestData, nodes []string, func testLoadBalancerLocalFromPod(t *testing.T, data *TestData, pods []string, url string, expectedClientIPs, expectedHostnames []string) { errMsg := "Service NodePort whose externalTrafficPolicy is Local should be able to be connected from Pod" for idx, pod := range pods { - hostname, err := probeHostnameFromPod(data, pod, url) + hostname, err := probeHostnameFromPod(data, pod, busyboxContainerName, url) require.NoError(t, err, errMsg) require.Equal(t, hostname, expectedHostnames[idx]) - clientIP, err := probeClientIPFromPod(data, pod, url) + clientIP, err := probeClientIPFromPod(data, pod, busyboxContainerName, url) require.NoError(t, err, errMsg) require.Equal(t, clientIP, expectedClientIPs[idx]) } @@ -413,7 +411,7 @@ func testNodePortClusterFromNode(t *testing.T, data *TestData, nodes, urls []str func testNodePortClusterFromPod(t *testing.T, data *TestData, pods, urls []string) { for _, url := range urls { for _, pod := range pods { - require.NoError(t, probeFromPod(data, pod, url), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from Pod") + require.NoError(t, probeFromPod(data, pod, busyboxContainerName, url), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from Pod") } } } @@ -444,11 +442,11 @@ func testNodePortLocalFromNode(t *testing.T, data *TestData, nodes, urls, expect func testNodePortLocalFromPod(t *testing.T, data *TestData, pods, urls, expectedClientIPs, expectedHostnames []string) { errMsg := "There should be no errors when accessing to Service NodePort whose externalTrafficPolicy is Local from Pod" for idx, pod := range pods { - hostname, err := probeHostnameFromPod(data, pod, urls[idx]) + hostname, err := probeHostnameFromPod(data, pod, busyboxContainerName, urls[idx]) require.NoError(t, err, errMsg) require.Equal(t, hostname, expectedHostnames[idx]) - clientIP, err := probeClientIPFromPod(data, pod, urls[idx]) + clientIP, err := probeClientIPFromPod(data, pod, busyboxContainerName, urls[idx]) require.NoError(t, err, errMsg) require.Equal(t, clientIP, expectedClientIPs[idx]) } @@ -506,69 +504,228 @@ func testProxyServiceSessionAffinity(ipFamily *corev1.IPFamily, ingressIPs []str agentName, err := data.getAntreaPodOnNode(nodeName) require.NoError(t, err) - table40Output, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, "table=40"}) + tableSessionAffinityName := "SessionAffinity" + tableSessionAffinityOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", tableSessionAffinityName)}) require.NoError(t, err) if *ipFamily == corev1.IPv4Protocol { - require.Contains(t, table40Output, fmt.Sprintf("nw_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) - require.Contains(t, table40Output, fmt.Sprintf("load:0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIP.ipv4.To4()), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("nw_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_REG3[]", strings.TrimLeft(hex.EncodeToString(nginxIP.ipv4.To4()), "0"))) for _, ingressIP := range ingressIPs { - require.Contains(t, table40Output, fmt.Sprintf("nw_dst=%s,tp_dst=80", ingressIP)) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("nw_dst=%s,tp_dst=80", ingressIP)) } } else { - require.Contains(t, table40Output, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) - require.Contains(t, table40Output, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[8:16]), "0"))) - require.Contains(t, table40Output, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[0:8]), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[0..63]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[8:16]), "0"))) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("load:0x%s->NXM_NX_XXREG3[64..127]", strings.TrimLeft(hex.EncodeToString([]byte(*nginxIP.ipv6)[0:8]), "0"))) for _, ingressIP := range ingressIPs { - require.Contains(t, table40Output, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", ingressIP)) + require.Contains(t, tableSessionAffinityOutput, fmt.Sprintf("ipv6_dst=%s,tp_dst=80", ingressIP)) } } } -func testProxyHairpinCase(t *testing.T, data *TestData) { - if len(clusterInfo.podV4NetworkCIDR) != 0 { - ipFamily := corev1.IPv4Protocol - testProxyHairpin(&ipFamily, data, t) - } - if len(clusterInfo.podV6NetworkCIDR) != 0 { - ipFamily := corev1.IPv6Protocol - testProxyHairpin(&ipFamily, data, t) - } + +func TestProxyHairpinIPv4(t *testing.T) { + skipIfProxyDisabled(t) + skipIfNotIPv4Cluster(t) + testProxyHairpin(t, false) } -func TestProxyHairpin(t *testing.T) { - skipIfHasWindowsNodes(t) +func TestProxyHairpinIPv6(t *testing.T) { skipIfProxyDisabled(t) + skipIfNotIPv6Cluster(t) + testProxyHairpin(t, true) +} + +func testProxyHairpin(t *testing.T, isIPv6 bool) { data, err := setupTest(t) if err != nil { t.Fatalf("Error when setting up test: %v", err) } defer teardownTest(t, data) - if len(clusterInfo.podV4NetworkCIDR) != 0 { - ipFamily := corev1.IPv4Protocol - testProxyHairpin(&ipFamily, data, t) + node := nodeName(1) + workerNodeIP := workerNodeIPv4(1) + controllerNodeIP := controlPlaneNodeIPv4() + ipProtocol := corev1.IPv4Protocol + lbClusterIngressIP := []string{"192.168.240.1"} + lbLocalIngressIP := []string{"192.168.240.2"} + if isIPv6 { + workerNodeIP = workerNodeIPv6(1) + controllerNodeIP = controlPlaneNodeIPv6() + ipProtocol = corev1.IPv6Protocol + lbClusterIngressIP = []string{"fd75::aabb:ccdd:ef00"} + lbLocalIngressIP = []string{"fd75::aabb:ccdd:ef01"} } - if len(clusterInfo.podV6NetworkCIDR) != 0 { - ipFamily := corev1.IPv6Protocol - testProxyHairpin(&ipFamily, data, t) + + // Create a ClusterIP Service. + serviceClusterIP := fmt.Sprintf("clusterip-%v", isIPv6) + clusterIPSvc, err := data.createAgnhostClusterIPService(serviceClusterIP, true, &ipProtocol) + defer data.deleteServiceAndWait(defaultTimeout, serviceClusterIP) + require.NoError(t, err) + + // Create two NodePort Services. The externalTrafficPolicy of one Service is Cluster, and the externalTrafficPolicy + // of another one is Local. + var nodePortCluster, nodePortLocal string + serviceNodePortCluster := fmt.Sprintf("nodeport-cluster-%v", isIPv6) + serviceNodePortLocal := fmt.Sprintf("nodeport-local-%v", isIPv6) + nodePortSvc, err := data.createAgnhostNodePortService(serviceNodePortCluster, true, false, &ipProtocol) + defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortCluster) + require.NoError(t, err) + for _, port := range nodePortSvc.Spec.Ports { + if port.NodePort != 0 { + nodePortCluster = fmt.Sprint(port.NodePort) + break + } } -} + require.NotEqual(t, "", nodePortCluster, "NodePort port number should not be empty") + nodePortSvc, err = data.createAgnhostNodePortService(serviceNodePortLocal, true, true, &ipProtocol) + require.NoError(t, err) + defer data.deleteServiceAndWait(defaultTimeout, serviceNodePortLocal) + for _, port := range nodePortSvc.Spec.Ports { + if port.NodePort != 0 { + nodePortLocal = fmt.Sprint(port.NodePort) + break + } + } + require.NotEqual(t, "", nodePortLocal, "NodePort port number should not be empty") -func testProxyHairpin(ipFamily *corev1.IPFamily, data *TestData, t *testing.T) { - busybox := randName("busybox-") - nodeName := nodeName(1) - err := data.createPodOnNode(busybox, testNamespace, nodeName, busyboxImage, []string{"nc", "-lk", "-p", "80"}, nil, nil, []corev1.ContainerPort{{ContainerPort: 80, Protocol: corev1.ProtocolTCP}}, false, nil) - defer data.deletePodAndWait(defaultTimeout, busybox, testNamespace) + // Create two LoadBalancer Services. The externalTrafficPolicy of one Service is Cluster, and the externalTrafficPolicy + // of another one is Local. + serviceLBCluster := fmt.Sprintf("lb-cluster-%v", isIPv6) + serviceLBLocal := fmt.Sprintf("lb-local-%v", isIPv6) + _, err = data.createAgnhostLoadBalancerService(serviceLBCluster, true, false, lbClusterIngressIP, &ipProtocol) require.NoError(t, err) - require.NoError(t, data.podWaitForRunning(defaultTimeout, busybox, testNamespace)) - svc, err := data.createService(busybox, testNamespace, 80, 80, map[string]string{"antrea-e2e": busybox}, false, false, corev1.ServiceTypeClusterIP, ipFamily) - defer data.deleteServiceAndWait(defaultTimeout, busybox) + _, err = data.createAgnhostLoadBalancerService(serviceLBLocal, true, true, lbLocalIngressIP, &ipProtocol) require.NoError(t, err) - // Hold on to make sure that the Service is realized. - time.Sleep(3 * time.Second) + // These are test urls. + port := "8080" + clusterIPUrl := net.JoinHostPort(clusterIPSvc.Spec.ClusterIP, port) + workerNodePortClusterUrl := net.JoinHostPort(workerNodeIP, nodePortCluster) + workerNodePortLocalUrl := net.JoinHostPort(workerNodeIP, nodePortLocal) + controllerNodePortClusterUrl := net.JoinHostPort(controllerNodeIP, nodePortCluster) + lbClusterUrl := net.JoinHostPort(lbClusterIngressIP[0], port) + lbLocalUrl := net.JoinHostPort(lbLocalIngressIP[0], port) + + // These are expected client IP. + expectedGatewayIP, _ := nodeGatewayIPs(1) + expectedVirtualIP := config.VirtualServiceIPv4.String() + expectedControllerIP := controllerNodeIP + if isIPv6 { + _, expectedGatewayIP = nodeGatewayIPs(1) + expectedVirtualIP = config.VirtualServiceIPv6.String() + } - stdout, stderr, err := data.runCommandFromPod(testNamespace, busybox, busyboxContainerName, []string{"nc", svc.Spec.ClusterIP, "80", "-w", "1", "-e", "ls", "/"}) - require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) + agnhost := fmt.Sprintf("agnhost-%v", isIPv6) + createAgnhostPod(t, data, agnhost, node, false) + t.Run("Non-HostNetwork Endpoints", func(t *testing.T) { + testProxyIntraNodeHairpinCases(data, t, expectedGatewayIP, agnhost, clusterIPUrl, workerNodePortClusterUrl, workerNodePortLocalUrl, lbClusterUrl, lbLocalUrl) + testProxyInterNodeHairpinCases(data, t, false, expectedControllerIP, nodeName(0), clusterIPUrl, controllerNodePortClusterUrl, lbClusterUrl) + }) + require.NoError(t, data.deletePod(testNamespace, agnhost)) + + agnhostHost := fmt.Sprintf("agnhost-host-%v", isIPv6) + createAgnhostPod(t, data, agnhostHost, node, true) + t.Run("HostNetwork Endpoints", func(t *testing.T) { + skipIfProxyAllDisabled(t, data) + testProxyIntraNodeHairpinCases(data, t, expectedVirtualIP, agnhostHost, clusterIPUrl, workerNodePortClusterUrl, workerNodePortLocalUrl, lbClusterUrl, lbLocalUrl) + testProxyInterNodeHairpinCases(data, t, true, expectedControllerIP, nodeName(0), clusterIPUrl, controllerNodePortClusterUrl, lbClusterUrl) + }) +} + +// If a Pod is not on host network, when it accesses a ClusterIP/NodePort/LoadBalancer Service whose Endpoint is on itself, +// that means a hairpin connection. Antrea gateway IP is used to SNAT the connection. The IP changes of the connection are: +// - Pod : Pod IP -> Service IP +// - OVS DNAT: Pod IP -> Pod IP +// - OVS SNAT: Antrea gateway IP -> Pod IP +// - Pod : Antrea gateway IP -> Pod IP +// +// If a Pod is on host network, when it accesses a ClusterIP/NodePort/LoadBalancer Service whose Endpoint is on itself +// (this is equivalent to that a Node accesses a Cluster/NodePort/LoadBalancer whose Endpoint is host network and the +// Endpoint is on this Node), that means a hairpin connection. A virtual IP is used to SNAT the connection to ensure +// that the packet can be routed via Antrea gateway. The IP changes of the connection are: +// - Antrea gateway: Antrea gateway IP -> Service IP +// - OVS DNAT: Antrea gateway IP -> Node IP +// - OVS SNAT: virtual IP -> Node IP +// - Antrea gateway: virtual IP -> Node IP +func testProxyIntraNodeHairpinCases(data *TestData, t *testing.T, expectedClientIP, pod, clusterIPUrl, nodePortClusterUrl, nodePortLocalUrl, lbClusterUrl, lbLocalUrl string) { + t.Run("IntraNode/ClusterIP", func(t *testing.T) { + clientIP, err := probeClientIPFromPod(data, pod, agnhostContainerName, clusterIPUrl) + require.NoError(t, err, "ClusterIP hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("IntraNode/NodePort/ExternalTrafficPolicy:Cluster", func(t *testing.T) { + skipIfProxyAllDisabled(t, data) + clientIP, err := probeClientIPFromPod(data, pod, agnhostContainerName, nodePortClusterUrl) + require.NoError(t, err, "NodePort whose externalTrafficPolicy is Cluster hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("IntraNode/NodePort/ExternalTrafficPolicy:Local", func(t *testing.T) { + skipIfProxyAllDisabled(t, data) + clientIP, err := probeClientIPFromPod(data, pod, agnhostContainerName, nodePortLocalUrl) + require.NoError(t, err, "NodePort whose externalTrafficPolicy is Local hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("IntraNode/LoadBalancer/ExternalTrafficPolicy:Cluster", func(t *testing.T) { + clientIP, err := probeClientIPFromPod(data, pod, agnhostContainerName, lbClusterUrl) + require.NoError(t, err, "LoadBalancer whose externalTrafficPolicy is Cluster hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("IntraNode/LoadBalancer/ExternalTrafficPolicy:Local", func(t *testing.T) { + clientIP, err := probeClientIPFromPod(data, pod, agnhostContainerName, lbLocalUrl) + require.NoError(t, err, "LoadBalancer whose externalTrafficPolicy is Local hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) +} + +// If client is Node A, when it accesses a ClusterIP/NodePort/LoadBalancer Service whose Endpoint is on Node B, below +// cases are hairpin (assumed that feature AntreaIPAM is not enabled): +// - Traffic mode: encap, Endpoint network: host network, OS: Linux/Windows +// - Traffic mode: noEncap, Endpoint network: not host network, OS: Linux (packets are routed via uplink interface) +// - Traffic mode: noEncap, Endpoint network: host network, OS: Linux/Windows +// The IP changes of the hairpin connections are: +// - Node A Antrea gateway: Antrea gateway IP -> Service IP +// - OVS DNAT: Antrea gateway IP -> Endpoint IP +// - OVS SNAT: virtual IP -> Endpoint IP +// - Node A Antrea gateway: virtual IP -> Endpoint IP +// - Node A output: Node A IP -> Endpoint IP (another SNAT for virtual IP, otherwise reply packets can't be routed back). +// - Node B: Node A IP -> Endpoint IP +func testProxyInterNodeHairpinCases(data *TestData, t *testing.T, hostNetwork bool, expectedClientIP, node, clusterIPUrl, nodePortClusterUrl, lbClusterUrl string) { + skipIfAntreaIPAMTest(t) + currentEncapMode, err := data.GetEncapMode() + if err != nil { + t.Fatalf("Failed to get encap mode: %v", err) + } + if !hostNetwork { + if testOptions.providerName == "kind" && (currentEncapMode == config.TrafficEncapModeEncap || currentEncapMode == config.TrafficEncapModeHybrid) { + t.Skipf("Skipping test because inter-Node Pod traffic is encapsulated when testbed is Kind and traffic mode is encap/hybrid") + } else if currentEncapMode == config.TrafficEncapModeEncap { + t.Skipf("Skipping test because inter-Node Pod traffic is encapsulated when testbed is not Kind and traffic mode encap") + } + } + + t.Run("InterNode/ClusterIP", func(t *testing.T) { + clientIP, err := probeClientIPFromNode(node, clusterIPUrl) + require.NoError(t, err, "ClusterIP hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("InterNode/NodePort/ExternalTrafficPolicy:Cluster", func(t *testing.T) { + skipIfProxyAllDisabled(t, data) + if !hostNetwork && currentEncapMode == config.TrafficEncapModeNoEncap { + skipIfHasWindowsNodes(t) + } + clientIP, err := probeClientIPFromNode(node, nodePortClusterUrl) + require.NoError(t, err, "NodePort whose externalTrafficPolicy is Cluster hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) + t.Run("InterNode/LoadBalancer/ExternalTrafficPolicy:Cluster", func(t *testing.T) { + skipIfProxyAllDisabled(t, data) + if !hostNetwork && currentEncapMode == config.TrafficEncapModeNoEncap { + skipIfHasWindowsNodes(t) + } + clientIP, err := probeClientIPFromNode(node, lbClusterUrl) + require.NoError(t, err, "LoadBalancer whose externalTrafficPolicy is Cluster hairpin should be able to be connected") + require.Equal(t, expectedClientIP, clientIP) + }) } func testProxyEndpointLifeCycleCase(t *testing.T, data *TestData) { @@ -623,8 +780,8 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te nginxIP = nginxIPs.ipv4.String() } - keywords := make(map[int]string) - keywords[42] = fmt.Sprintf("nat(dst=%s)", net.JoinHostPort(nginxIP, "80")) // endpointNATTable + keywords := make(map[string]string) + keywords["EndpointDNAT"] = fmt.Sprintf("nat(dst=%s)", net.JoinHostPort(nginxIP, "80")) // endpointNATTable var groupKeywords []string if *ipFamily == corev1.IPv6Protocol { @@ -633,8 +790,8 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te groupKeywords = append(groupKeywords, fmt.Sprintf("0x%s->NXM_NX_REG3[]", strings.TrimPrefix(hex.EncodeToString(nginxIPs.ipv4.To4()), "0"))) } - for tableID, keyword := range keywords { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) + for tableName, keyword := range keywords { + tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", tableName)}) require.NoError(t, err) require.Contains(t, tableOutput, keyword) } @@ -650,8 +807,8 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te // Wait for one second to make sure the pipeline to be updated. time.Sleep(time.Second) - for tableID, keyword := range keywords { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) + for tableName, keyword := range keywords { + tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", tableName)}) require.NoError(t, err) require.NotContains(t, tableOutput, keyword) } @@ -732,18 +889,18 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d } } - table42Format := "nat(dst=%s:80)" + tableEndpointDNATFlowFormat := "nat(dst=%s:80)" if *ipFamily == corev1.IPv6Protocol { - table42Format = "nat(dst=[%s]:80)" + tableEndpointDNATFlowFormat = "nat(dst=[%s]:80)" } expectedFlows := []expectTableFlows{ { - 41, // serviceLBTable + "ServiceLB", // serviceLBTable svcLBflows, }, { - 42, - []string{fmt.Sprintf(table42Format, nginxIP)}, // endpointNATTable + "EndpointDNAT", + []string{fmt.Sprintf(tableEndpointDNATFlowFormat, nginxIP)}, // endpointNATTable }, } @@ -757,7 +914,7 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d require.NoError(t, err) require.Contains(t, groupOutput, groupKeyword) for _, expectedTable := range expectedFlows { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) + tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", expectedTable.tableName)}) require.NoError(t, err) for _, expectedFlow := range expectedTable.flows { require.Contains(t, tableOutput, expectedFlow) @@ -774,7 +931,7 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d require.NoError(t, err) require.NotContains(t, groupOutput, groupKeyword) for _, expectedTable := range expectedFlows { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) + tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%s", expectedTable.tableName)}) require.NoError(t, err) for _, expectedFlow := range expectedTable.flows { require.NotContains(t, tableOutput, expectedFlow) diff --git a/test/e2e/traceflow_test.go b/test/e2e/traceflow_test.go index f7037582829..76309594548 100644 --- a/test/e2e/traceflow_test.go +++ b/test/e2e/traceflow_test.go @@ -438,7 +438,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -488,7 +488,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -533,7 +533,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -638,7 +638,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -802,7 +802,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -852,7 +852,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -897,7 +897,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -970,7 +970,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1018,7 +1018,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1066,7 +1066,7 @@ func testTraceflowIntraNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1102,6 +1102,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { node1Pods, _, node1CleanupFn := createTestBusyboxPods(t, data, 1, testNamespace, node1) node2Pods, node2IPs, node2CleanupFn := createTestBusyboxPods(t, data, 2, testNamespace, node2) + gatewayIPv4, gatewayIPv6 := nodeGatewayIPs(1) defer node1CleanupFn() defer node2CleanupFn() var dstPodIPv4Str, dstPodIPv6Str string @@ -1214,7 +1215,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1228,7 +1229,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1278,7 +1279,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1292,7 +1293,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1337,7 +1338,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1351,7 +1352,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1409,7 +1410,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1423,7 +1424,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1471,7 +1472,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { { Component: v1alpha1.ComponentLB, Pod: fmt.Sprintf("%s/%s", testNamespace, nginxPodName), - TranslatedSrcIP: "169.254.169.252", + TranslatedSrcIP: gatewayIPv4, TranslatedDstIP: nginxIPv4Str, Action: v1alpha1.ActionForwarded, }, @@ -1482,7 +1483,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1525,7 +1526,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1539,7 +1540,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1591,7 +1592,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1605,7 +1606,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1656,7 +1657,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1670,7 +1671,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1715,7 +1716,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1729,7 +1730,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1787,7 +1788,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1801,7 +1802,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1849,7 +1850,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { { Component: v1alpha1.ComponentLB, Pod: fmt.Sprintf("%s/%s", testNamespace, nginxPodName), - TranslatedSrcIP: "fc00::aabb:ccdd:eeff", + TranslatedSrcIP: gatewayIPv6, TranslatedDstIP: nginxIPv6Str, Action: v1alpha1.ActionForwarded, }, @@ -1860,7 +1861,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1908,7 +1909,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwarded, }, }, @@ -1922,7 +1923,7 @@ func testTraceflowInterNode(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionDelivered, }, }, @@ -1981,7 +1982,7 @@ func testTraceflowExternalIP(t *testing.T, data *TestData) { }, { Component: v1alpha1.ComponentForwarding, - ComponentInfo: "Output", + ComponentInfo: "L2ForwardingOut", Action: v1alpha1.ActionForwardedOutOfOverlay, }, }, diff --git a/test/integration/agent/openflow_test.go b/test/integration/agent/openflow_test.go index 4a67af35190..b44799b9a4c 100644 --- a/test/integration/agent/openflow_test.go +++ b/test/integration/agent/openflow_test.go @@ -126,6 +126,7 @@ func TestConnectivityFlows(t *testing.T) { }() config := prepareConfiguration() + t.Run("testInitialize", func(t *testing.T) { testInitialize(t, config) }) @@ -241,7 +242,8 @@ func TestReplayFlowsNetworkPolicyFlows(t *testing.T) { err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) - _, err = c.Initialize(roundInfo, &config1.NodeConfig{}, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) + config := prepareConfiguration() + _, err = c.Initialize(roundInfo, config.nodeConfig, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) require.Nil(t, err, "Failed to initialize OFClient") defer func() { @@ -294,14 +296,14 @@ func testExternalFlows(t *testing.T, config *testConfig) { t.Errorf("Failed to install OpenFlow entries to allow Pod to communicate to the external addresses: %v", err) } - gwMAC := config.nodeConfig.GatewayConfig.MAC - if config.nodeConfig.NodeIPv4Addr != nil && config.nodeConfig.PodIPv4CIDR != nil { - for _, tableFlow := range expectedExternalFlows(config.nodeConfig.NodeIPv4Addr.IP, config.nodeConfig.PodIPv4CIDR, gwMAC) { + gwMACStr := config.nodeConfig.GatewayConfig.MAC.String() + if config.enableIPv4 { + for _, tableFlow := range expectedExternalFlows("ip", gwMACStr) { ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableName, 0, true, tableFlow.flows) } } - if config.nodeConfig.NodeIPv6Addr != nil && config.nodeConfig.PodIPv6CIDR != nil { - for _, tableFlow := range expectedExternalFlows(config.nodeConfig.NodeIPv6Addr.IP, config.nodeConfig.PodIPv6CIDR, gwMAC) { + if config.enableIPv6 { + for _, tableFlow := range expectedExternalFlows("ipv6", gwMACStr) { ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableName, 0, true, tableFlow.flows) } } @@ -427,7 +429,8 @@ func TestNetworkPolicyFlows(t *testing.T) { err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) - _, err = c.Initialize(roundInfo, &config1.NodeConfig{PodIPv4CIDR: podIPv4CIDR, PodIPv6CIDR: podIPv6CIDR, GatewayConfig: gwConfig}, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) + config := prepareConfiguration() + _, err = c.Initialize(roundInfo, config.nodeConfig, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) require.Nil(t, err, "Failed to initialize OFClient") defer func() { @@ -579,7 +582,8 @@ func TestProxyServiceFlows(t *testing.T) { err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) - _, err = c.Initialize(roundInfo, &config1.NodeConfig{PodIPv4CIDR: podIPv4CIDR, PodIPv6CIDR: podIPv6CIDR, GatewayConfig: gwConfig}, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) + config := prepareConfiguration() + _, err = c.Initialize(roundInfo, config.nodeConfig, &config1.NetworkConfig{TrafficEncapMode: config1.TrafficEncapModeEncap}) require.Nil(t, err, "Failed to initialize OFClient") defer func() { @@ -698,20 +702,20 @@ func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList [ svcFlows := expectTableFlows{tableName: "ServiceLB", flows: []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,%s,reg4=0x10000/0x70000,nw_dst=%s,tp_dst=%d", string(svc.protocol), svc.ip.String(), svc.port), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[19],load:0x%x->NXM_NX_REG7[],group:%d", serviceLearnReg, gid, gid), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[11],load:0x%x->NXM_NX_REG7[],group:%d", serviceLearnReg, gid, gid), }, { MatchStr: fmt.Sprintf("priority=190,%s,reg4=0x30000/0x70000,nw_dst=%s,tp_dst=%d", string(svc.protocol), svc.ip.String(), svc.port), - ActStr: fmt.Sprintf("learn(table=SessionAffinity,hard_timeout=%d,priority=200,delete_learned,cookie=0x%x,eth_type=0x800,nw_proto=%d,%s,NXM_OF_IP_DST[],NXM_OF_IP_SRC[],load:NXM_NX_REG3[]->NXM_NX_REG3[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[19]),load:0x2->NXM_NX_REG4[16..18],goto_table:EndpointDNAT", stickyAge, cookieAllocator.RequestWithObjectID(4, gid).Raw(), nw_proto, learnProtoField), + ActStr: fmt.Sprintf("learn(table=SessionAffinity,hard_timeout=%d,priority=200,delete_learned,cookie=0x%x,eth_type=0x800,nw_proto=%d,%s,NXM_OF_IP_DST[],NXM_OF_IP_SRC[],load:NXM_NX_REG3[]->NXM_NX_REG3[],load:NXM_NX_REG4[0..15]->NXM_NX_REG4[0..15],load:0x2->NXM_NX_REG4[16..18],load:0x1->NXM_NX_REG0[11]),load:0x2->NXM_NX_REG4[16..18],goto_table:EndpointDNAT", stickyAge, cookieAllocator.RequestWithObjectID(4, gid).Raw(), nw_proto, learnProtoField), }, }} epDNATFlows := expectTableFlows{tableName: "EndpointDNAT", flows: []*ofTestUtils.ExpectFlow{}} - hairpinFlows := expectTableFlows{tableName: "HairpinSNAT", flows: []*ofTestUtils.ExpectFlow{}} + hairpinFlows := expectTableFlows{tableName: "ServiceHairpinMark", flows: []*ofTestUtils.ExpectFlow{}} groupBuckets = make([]string, 0) for _, ep := range endpointList { epIP := ipToHexString(net.ParseIP(ep.IP())) epPort, _ := ep.Port() - bucket := fmt.Sprintf("weight:100,actions=load:%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15],resubmit(,42)", epIP, epPort) + bucket := fmt.Sprintf("weight:100,actions=load:%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15],resubmit(,%d)", epIP, epPort, ofClient.EndpointDNATTable.GetID()) groupBuckets = append(groupBuckets, bucket) unionVal := (0b010 << 16) + uint32(epPort) @@ -723,7 +727,7 @@ func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList [ if ep.GetIsLocal() { hairpinFlows.flows = append(hairpinFlows.flows, &ofTestUtils.ExpectFlow{ MatchStr: fmt.Sprintf("priority=200,ip,nw_src=%s,nw_dst=%s", ep.IP(), ep.IP()), - ActStr: "set_field:169.254.169.252->ip_src,load:0x1->NXM_NX_REG0[18],goto_table:Output", + ActStr: "load:0x1->NXM_NX_REG0[9],load:0x1->NXM_NX_REG0[10],load:0->NXM_NX_REG4[22..23],goto_table:IPTTLDec", }) } } @@ -990,7 +994,7 @@ func prepareIPv6Configuration() *testConfig { MAC: gwMAC, } nodeConfig := &config1.NodeConfig{ - NodeIPv4Addr: nodeSubnet, + NodeIPv6Addr: nodeSubnet, GatewayConfig: gatewayConfig, PodIPv6CIDR: podIPv6CIDR, } @@ -1028,7 +1032,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, podIPv4 := util.GetIPv4Addr(podIPs) isAntreaFlexibleIPAM := connectUplinkToBridge && podIPv4 != nil && !nodeConfig.PodIPv4CIDR.Contains(podIPv4) actionAntreaFlexibleIPAMMarkString := "" - matchRewriteMACMarkString := ",reg0=0x80000/0x80000" + matchRewriteMACMarkString := ",reg0=0x800/0x800" if isAntreaFlexibleIPAM { actionAntreaFlexibleIPAMMarkString = ",load:0x1->NXM_NX_REG4[21]" matchRewriteMACMarkString = "" @@ -1048,7 +1052,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", podMAC.String()), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:IngressRule", podOFPort), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[8],goto_table:IngressClassifier", podOFPort), }, }, }, @@ -1060,7 +1064,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=210,in_port=%d,dl_dst=%s", 3, podMAC.String()), - ActStr: fmt.Sprintf("load:0x4->NXM_NX_REG0[0..3],goto_table:ServiceHairpin"), + ActStr: fmt.Sprintf("load:0x4->NXM_NX_REG0[0..3],goto_table:SNATConntrackZone"), }, }, }, @@ -1069,7 +1073,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=210,in_port=LOCAL,dl_dst=%s", podMAC.String()), - ActStr: fmt.Sprintf("load:0x5->NXM_NX_REG0[0..3],goto_table:ServiceHairpin"), + ActStr: fmt.Sprintf("load:0x5->NXM_NX_REG0[0..3],goto_table:SNATConntrackZone"), }, }, }}...) @@ -1084,7 +1088,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, nwDstField = "nw_dst" flows = append(flows, expectTableFlows{ - "SpoofGuard", + "ARPSpoofGuard", []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", podOFPort, podIP.String(), podMAC.String()), @@ -1092,7 +1096,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, }, }, }) - nextTableForSpoofguard = "ServiceHairpin" + nextTableForSpoofguard = "SNATConntrack" } else { ipProto = "ipv6" nwSrcField = "ipv6_src" @@ -1114,7 +1118,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,%s%s,%s=%s", ipProto, matchRewriteMACMarkString, nwDstField, podIP.String()), - ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,goto_table:IPTTLDec", gwMAC.String(), podMAC.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,load:0x2->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark", gwMAC.String(), podMAC.String()), }, }, }, @@ -1140,7 +1144,7 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", gwMAC.String()), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:IngressMetric", config1.HostGatewayOFPort), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[8],goto_table:IngressClassifier", config1.HostGatewayOFPort), }, }, }, @@ -1157,15 +1161,21 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa "SpoofGuard", []*ofTestUtils.ExpectFlow{ { - MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", config1.HostGatewayOFPort, gwIP, gwMAC), - ActStr: "goto_table:ARPResponder", + MatchStr: fmt.Sprintf("priority=200,ip,in_port=%d", config1.HostGatewayOFPort), + ActStr: "goto_table:SNATConntrackZone", }, + }, + }, + expectTableFlows{ + "ARPSpoofGuard", + []*ofTestUtils.ExpectFlow{ { - MatchStr: fmt.Sprintf("priority=200,ip,in_port=%d", config1.HostGatewayOFPort), - ActStr: "goto_table:ServiceHairpin", + MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", config1.HostGatewayOFPort, gwIP, gwMAC), + ActStr: "goto_table:ARPResponder", }, }, - }) + }, + ) if connectUplinkToBridge { flows[len(flows)-1].flows = append(flows[len(flows)-1].flows, &ofTestUtils.ExpectFlow{ MatchStr: fmt.Sprintf("priority=200,arp,in_port=%d,arp_spa=%s,arp_sha=%s", config1.HostGatewayOFPort, nodeConfig.NodeIPv4Addr.IP.String(), gwMAC), @@ -1178,15 +1188,6 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa nwDstStr = "ipv6_dst" } flows = append(flows, - expectTableFlows{ - "L3Forwarding", - []*ofTestUtils.ExpectFlow{ - { - MatchStr: fmt.Sprintf("priority=200,%s,reg0=0x80000/0x80000,%s=%s", ipProtoStr, nwDstStr, gwIP.String()), - ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:L2Forwarding", gwMAC.String()), - }, - }, - }, expectTableFlows{ tableName: "IngressRule", flows: []*ofTestUtils.ExpectFlow{ @@ -1201,11 +1202,15 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=210,ct_state=+rpl+trk,ct_mark=0x2/0x2,%s,reg0=0x2/0xf", ipProtoStr), - ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:L2Forwarding", gwMAC.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,load:0x1->NXM_NX_REG0[4..7],goto_table:L2Forwarding", gwMAC.String()), }, { MatchStr: fmt.Sprintf("priority=210,ct_state=+rpl+trk,ct_mark=0x2/0x2,%s,reg0=0/0xf", ipProtoStr), - ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:L2Forwarding", gwMAC.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,load:0x1->NXM_NX_REG0[4..7],goto_table:L2Forwarding", gwMAC.String()), + }, + { + MatchStr: fmt.Sprintf("priority=200,%s,reg0=0x800/0x800,%s=%s", ipProtoStr, nwDstStr, gwIP.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,load:0x1->NXM_NX_REG0[4..7],goto_table:L2Forwarding", gwMAC.String()), }, }, }, @@ -1222,7 +1227,7 @@ func prepareTunnelFlows(tunnelPort uint32, vMAC net.HardwareAddr) []expectTableF []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,in_port=%d", tunnelPort), - ActStr: "load:0->NXM_NX_REG0[0..3],load:0x1->NXM_NX_REG0[19],goto_table:ConntrackZone", + ActStr: "load:0->NXM_NX_REG0[0..3],load:0x1->NXM_NX_REG0[11],goto_table:SNATConntrackZone", }, }, }, @@ -1231,7 +1236,7 @@ func prepareTunnelFlows(tunnelPort uint32, vMAC net.HardwareAddr) []expectTableF []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", vMAC.String()), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:IngressMetric", config1.DefaultTunOFPort), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[8],goto_table:IngressClassifier", config1.DefaultTunOFPort), }, }, }, @@ -1262,7 +1267,7 @@ func prepareNodeFlows(peerSubnet net.IPNet, peerGwIP, peerNodeIP net.IP, vMAC, l []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,%s,%s=%s", ipProtoStr, nwDstFieldName, peerSubnet.String()), - ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,set_field:%s->tun_dst,goto_table:IPTTLDec", localGwMAC.String(), vMAC.String(), peerNodeIP.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,set_field:%s->tun_dst,load:0->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark", localGwMAC.String(), vMAC.String(), peerNodeIP.String()), }, }, }) @@ -1272,7 +1277,7 @@ func prepareNodeFlows(peerSubnet net.IPNet, peerGwIP, peerNodeIP net.IP, vMAC, l []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=210,%s,reg4=0x200000/0x200000,%s=%s", ipProtoStr, nwDstFieldName, peerSubnet.String()), - ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:L2Forwarding", peerNodeMAC.String()), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,load:0x1->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark", peerNodeMAC.String()), }, }, }) @@ -1296,59 +1301,100 @@ func prepareServiceHelperFlows() []expectTableFlows { } func prepareDefaultFlows(config *testConfig) []expectTableFlows { - table20Flows := expectTableFlows{ + tableARPResponderFlows := expectTableFlows{ tableName: "ARPResponder", - flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "drop"}}, } - table31Flows := expectTableFlows{ + tableConntrackStateFlows := expectTableFlows{ tableName: "ConntrackState", flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "resubmit(,SessionAffinity),resubmit(,ServiceLB)"}}, } - table105Flows := expectTableFlows{ + tableConntrackCommitFlows := expectTableFlows{ tableName: "ConntrackCommit", - flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:HairpinSNAT"}}, + flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:L2Forwarding"}}, } - table72Flows := expectTableFlows{ + tableL3ForwardingFlows := expectTableFlows{ + tableName: "L3Forwarding", + } + tableIPTTLDecFlows := expectTableFlows{ tableName: "IPTTLDec", - flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:L2Forwarding"}}, + flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:SNAT"}}, + } + tableSNATConntrackZoneFlows := expectTableFlows{ + tableName: "SNATConntrackZone", } - table30Flows := expectTableFlows{ + tableConntrackZoneFlows := expectTableFlows{ tableName: "ConntrackZone", } + tableServiceHairpinMarkFlows := expectTableFlows{ + tableName: "ServiceHairpinMark", + flows: []*ofTestUtils.ExpectFlow{ + {MatchStr: "priority=200,reg0=0x51/0xff", ActStr: "load:0x1->NXM_NX_REG0[9],load:0->NXM_NX_REG0[10],goto_table:IPTTLDec"}, + {MatchStr: "priority=200,reg0=0x61/0xff", ActStr: "load:0x1->NXM_NX_REG0[9],load:0->NXM_NX_REG0[10],goto_table:IPTTLDec"}, + {MatchStr: "priority=0", ActStr: "goto_table:IPTTLDec"}, + }, + } if config.enableIPv4 { - table30Flows.flows = append(table30Flows.flows, + tableARPResponderFlows.flows = append(tableARPResponderFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=190,arp", ActStr: "NORMAL"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=0", ActStr: "drop"}, + ) + tableSNATConntrackZoneFlows.flows = append(tableSNATConntrackZoneFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ip", ActStr: "ct(table=ConntrackZone,zone=65521,nat)"}, + ) + tableConntrackZoneFlows.flows = append(tableConntrackZoneFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_mark=0x20/0x20,ip", ActStr: "load:0x1->NXM_NX_REG0[9],ct(table=ConntrackState,zone=65520,nat)"}, &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ip", ActStr: "ct(table=ConntrackState,zone=65520,nat)"}, ) - table31Flows.flows = append(table31Flows.flows, + tableConntrackStateFlows.flows = append(tableConntrackStateFlows.flows, &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ip", ActStr: "drop"}, ) - table105Flows.flows = append(table105Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x1/0xf", ActStr: "ct(commit,table=HairpinSNAT,zone=65520,exec(load:0x1->NXM_NX_CT_MARK[1])"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ip", ActStr: "ct(commit,table=HairpinSNAT,zone=65520)"}, + tableConntrackCommitFlows.flows = append(tableConntrackCommitFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_mark=0x4/0x4,ip", ActStr: "goto_table:L2ForwardingOut"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x1/0xf", ActStr: "ct(commit,table=L2ForwardingOut,zone=65520,exec(load:0x1->NXM_NX_CT_MARK[1])"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ip", ActStr: "ct(commit,table=L2ForwardingOut,zone=65520)"}, ) - table72Flows.flows = append(table72Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ip,reg0=0x1/0xf", ActStr: "goto_table:L2Forwarding"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ip", ActStr: "dec_ttl,goto_table:L2Forwarding"}, + nodeIPStr := config.nodeConfig.NodeIPv4Addr.String() + podCIDR := config.nodeConfig.PodIPv4CIDR.String() + tableL3ForwardingFlows.flows = append(tableL3ForwardingFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=200,ip,reg0=0/0x800,nw_dst=%s", nodeIPStr), ActStr: "load:0x5->NXM_NX_REG0[4..7],goto_table:L2Forwarding"}, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=200,ip,reg0=800/0x800,nw_dst=%s", nodeIPStr), ActStr: "load:0x5->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark"}, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=190,ip,reg0=800/0x800,nw_dst=%s", podCIDR), ActStr: "load:0x1->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark"}, + ) + tableIPTTLDecFlows.flows = append(tableIPTTLDecFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ip,reg0=0x1/0xf", ActStr: "goto_table:SNAT"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ip", ActStr: "dec_ttl,goto_table:SNAT"}, ) } if config.enableIPv6 { - table30Flows.flows = append(table30Flows.flows, + tableSNATConntrackZoneFlows.flows = append(tableSNATConntrackZoneFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ipv6", ActStr: "ct(table=ConntrackZone,zone=65511,nat)"}, + ) + tableConntrackZoneFlows.flows = append(tableConntrackZoneFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_mark=0x20/0x20,ipv6", ActStr: "load:0x1->NXM_NX_REG0[9],ct(table=ConntrackState,zone=65510,nat)"}, &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ipv6", ActStr: "ct(table=ConntrackState,zone=65510,nat)"}, ) - table31Flows.flows = append(table31Flows.flows, + tableConntrackStateFlows.flows = append(tableConntrackStateFlows.flows, &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ipv6", ActStr: "drop"}, ) - table105Flows.flows = append(table105Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ipv6,reg0=0x1/0xf", ActStr: "ct(commit,table=HairpinSNAT,zone=65510,exec(load:0x1->NXM_NX_CT_MARK[1])"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ipv6", ActStr: "ct(commit,table=HairpinSNAT,zone=65510)"}, + tableConntrackCommitFlows.flows = append(tableConntrackCommitFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ct_mark=0x4/0x4,ipv6", ActStr: "goto_table:L2ForwardingOut"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ipv6,reg0=0x1/0xf", ActStr: "ct(commit,table=L2ForwardingOut,zone=65510,exec(load:0x1->NXM_NX_CT_MARK[1])"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ipv6", ActStr: "ct(commit,table=L2ForwardingOut,zone=65510)"}, ) - table72Flows.flows = append(table72Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ipv6,reg0=0x1/0xf", ActStr: "goto_table:L2Forwarding"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ipv6", ActStr: "dec_ttl,goto_table:L2Forwarding"}, + nodeIPStr := config.nodeConfig.NodeIPv6Addr.String() + podCIDR := config.nodeConfig.PodIPv6CIDR.String() + tableL3ForwardingFlows.flows = append(tableL3ForwardingFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=200,ipv6,reg0=0/0x800,nw_dst=%s", nodeIPStr), ActStr: "load:0x5->NXM_NX_REG0[4..7],goto_table:L2Forwarding"}, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=200,ipv6,reg0=800/0x800,nw_dst=%s", nodeIPStr), ActStr: "load:0x5->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark"}, + &ofTestUtils.ExpectFlow{MatchStr: fmt.Sprintf("priority=190,ip,reg0=800/0x800,nw_dst=%s", podCIDR), ActStr: "load:0x1->NXM_NX_REG0[4..7],goto_table:ServiceHairpinMark"}, + ) + tableIPTTLDecFlows.flows = append(tableIPTTLDecFlows.flows, + &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ipv6,reg0=0x1/0xf", ActStr: "goto_table:SNAT"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ipv6", ActStr: "dec_ttl,goto_table:SNAT"}, ) } - if config.connectUplinkToBridge { - table20Flows.flows = append(table20Flows.flows, + if config.enableIPv4 && config.connectUplinkToBridge { + tableARPResponderFlows.flows = append(tableARPResponderFlows.flows, &ofTestUtils.ExpectFlow{ MatchStr: fmt.Sprintf("priority=200,arp,arp_tpa=%s,arp_op=1", config.nodeConfig.GatewayConfig.IPv4.String()), ActStr: fmt.Sprintf("move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],set_field:%s->eth_src,load:0x2->NXM_OF_ARP_OP[],move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],set_field:%s->arp_sha,move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],set_field:%s->arp_spa,IN_PORT", config.nodeConfig.GatewayConfig.MAC.String(), config.nodeConfig.GatewayConfig.MAC.String(), config.nodeConfig.GatewayConfig.IPv4.String()), @@ -1356,7 +1402,13 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { ) } return []expectTableFlows{ - table20Flows, table30Flows, table31Flows, table105Flows, table72Flows, + tableARPResponderFlows, + tableConntrackZoneFlows, + tableConntrackStateFlows, + tableConntrackCommitFlows, + tableIPTTLDecFlows, + tableSNATConntrackZoneFlows, + tableServiceHairpinMarkFlows, { "Classification", []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "drop"}}, @@ -1365,13 +1417,6 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { "SpoofGuard", []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "drop"}}, }, - { - "ARPResponder", - []*ofTestUtils.ExpectFlow{ - {MatchStr: "priority=190,arp", ActStr: "NORMAL"}, - {MatchStr: "priority=0", ActStr: "drop"}, - }, - }, { "EndpointDNAT", []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:EgressRule"}}, @@ -1390,10 +1435,14 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { }, { "L3Forwarding", - []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:L2Forwarding"}}, + []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "load:0x6->NXM_NX_REG0[4..7],goto_table:L2Forwarding"}}, }, { "L2Forwarding", + []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:IngressClassifier"}}, + }, + { + "IngressClassifier", []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:IngressMetric"}}, }, { @@ -1409,9 +1458,9 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:ConntrackCommit"}}, }, { - "Output", + "L2ForwardingOut", []*ofTestUtils.ExpectFlow{ - {MatchStr: "priority=200,ip,reg0=0x10000/0x10000", ActStr: "output:NXM_NX_REG1[]"}, + {MatchStr: "priority=200,reg0=0x100/0x100", ActStr: "output:NXM_NX_REG1[]"}, }, }, } @@ -1435,35 +1484,14 @@ func prepareIPNetAddresses(addresses []string) []types.Address { return ipAddresses } -func expectedExternalFlows(nodeIP net.IP, localSubnet *net.IPNet, gwMAC net.HardwareAddr) []expectTableFlows { - var ipProtoStr, nwDstFieldName string - if localSubnet.IP.To4() != nil { - ipProtoStr = "ip" - nwDstFieldName = "nw_dst" - } else { - ipProtoStr = "ipv6" - nwDstFieldName = "ipv6_dst" - } +func expectedExternalFlows(ipProtoStr, gwMACStr string) []expectTableFlows { return []expectTableFlows{ { - // snatCommonFlows() "L3Forwarding", []*ofTestUtils.ExpectFlow{ { - MatchStr: fmt.Sprintf("priority=200,%s,reg0=0/0x80000,%s=%s", ipProtoStr, nwDstFieldName, localSubnet.String()), - ActStr: "goto_table:L2Forwarding", - }, - { - MatchStr: fmt.Sprintf("priority=200,%s,reg0=0x2/0xf,%s=%s", ipProtoStr, nwDstFieldName, nodeIP.String()), - ActStr: "goto_table:L2Forwarding", - }, - { - MatchStr: fmt.Sprintf("priority=190,%s,reg0=0x2/0xf", ipProtoStr), - ActStr: "goto_table:SNAT", - }, - { - MatchStr: fmt.Sprintf("priority=190,%s,reg0=0/0xf", ipProtoStr), - ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:SNAT", gwMAC.String()), + MatchStr: "priority=0", + ActStr: "load:0x6->NXM_NX_REG0[4..7],load:0x1->NXM_NX_REG4[24],goto_table:SNAT", }, }, }, @@ -1471,12 +1499,16 @@ func expectedExternalFlows(nodeIP net.IP, localSubnet *net.IPNet, gwMAC net.Hard "SNAT", []*ofTestUtils.ExpectFlow{ { - MatchStr: fmt.Sprintf("priority=190,ct_state=+new+trk,%s,reg0=0/0xf", ipProtoStr), + MatchStr: fmt.Sprintf("priority=190,ct_state=+new+trk,%s,reg0=0/0xf,reg4=0x1000000/0x1000000", ipProtoStr), ActStr: "drop", }, + { + MatchStr: fmt.Sprintf("priority=200,ct_state=-new+trk,%s,reg0=0/0xf,reg4=0x1000000/0x1000000", ipProtoStr), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,goto_table:L2Forwarding", gwMACStr), + }, { MatchStr: "priority=0", - ActStr: "goto_table:L2Forwarding", + ActStr: "goto_table:SNATConntrackCommit", }, }, }, @@ -1497,16 +1529,16 @@ func prepareSNATFlows(snatIP net.IP, mark, podOFPort, podOFPortRemote uint32, vM "SNAT", []*ofTestUtils.ExpectFlow{ { - MatchStr: fmt.Sprintf("priority=200,ct_state=+new+trk,%s,%s=%s", ipProtoStr, tunDstFieldName, snatIP), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_PKT_MARK[0..7],goto_table:IPTTLDec", mark), + MatchStr: fmt.Sprintf("priority=200,ct_state=+new+trk,%s,reg4=0x1000000/0x1000000,%s=%s", ipProtoStr, tunDstFieldName, snatIP), + ActStr: fmt.Sprintf("set_field:%s->eth_dst,load:0x%x->NXM_NX_PKT_MARK[0..7],goto_table:L2Forwarding", localGwMAC.String(), mark), }, { - MatchStr: fmt.Sprintf("priority=200,ct_state=+new+trk,%s,in_port=%d", ipProtoStr, podOFPort), + MatchStr: fmt.Sprintf("priority=200,ct_state=+new+trk,%s,reg4=0x1000000/0x1000000,in_port=%d", ipProtoStr, podOFPort), ActStr: fmt.Sprintf("load:0x%x->NXM_NX_PKT_MARK[0..7],goto_table:L2Forwarding", mark), }, { - MatchStr: fmt.Sprintf("priority=200,%s,in_port=%d", ipProtoStr, podOFPortRemote), - ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,set_field:%s->%s,goto_table:IPTTLDec", localGwMAC.String(), vMAC.String(), snatIP, tunDstFieldName), + MatchStr: fmt.Sprintf("priority=200,%s,reg4=0x1000000/0x1000000,in_port=%d", ipProtoStr, podOFPortRemote), + ActStr: fmt.Sprintf("set_field:%s->eth_src,set_field:%s->eth_dst,set_field:%s->%s,goto_table:L2Forwarding", localGwMAC.String(), vMAC.String(), snatIP, tunDstFieldName), }, }, }, diff --git a/test/integration/ovs/ofctrl_test.go b/test/integration/ovs/ofctrl_test.go index 0509eb9f638..87681286351 100644 --- a/test/integration/ovs/ofctrl_test.go +++ b/test/integration/ovs/ofctrl_test.go @@ -66,13 +66,14 @@ var ( peerGW = net.ParseIP("192.168.2.1") vMAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:ff") - ipDSCP = uint8(10) - - t0 = binding.NewOFTable(0, "t0") - t1 = binding.NewOFTable(1, "t1") - t2 = binding.NewOFTable(2, "t2") - t3 = binding.NewOFTable(3, "t3") - t4 = binding.NewOFTable(4, "t4") + ipDSCP = uint8(10) + pipelineID = binding.NewPipelineID() + + t0 = binding.NewOFTable(0, "t0", binding.ClassifierStage, pipelineID) + t1 = binding.NewOFTable(1, "t1", binding.ClassifierStage, pipelineID) + t2 = binding.NewOFTable(2, "t2", binding.ClassifierStage, pipelineID) + t3 = binding.NewOFTable(3, "t3", binding.ClassifierStage, pipelineID) + t4 = binding.NewOFTable(4, "t4", binding.ClassifierStage, pipelineID) ) func newOFBridge(brName string) binding.Bridge { @@ -128,7 +129,7 @@ func prepareOverlapFlows(table binding.Table, ipStr string, sameCookie bool) ([] table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(cookie2). MatchSrcIP(srcIP). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), } expectFlows := []*ExpectFlow{ @@ -441,7 +442,7 @@ func TestBundleErrorWhenOVSRestart(t *testing.T) { flows := []binding.Flow{table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). MatchInPort(uint32(count + 1)). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done()} err = bridge.AddFlowsInBundle(flows, nil, nil) if err != nil { @@ -643,7 +644,7 @@ func TestPacketOutIn(t *testing.T) { Action().LoadToRegField(reg2Field, reg2Data). Action().LoadToRegField(reg3Field, reg3Data). Action().SetTunnelDst(tunDst). - Action().ResubmitToTable(table0.GetNext()). + Action().ResubmitToTables(table0.GetNext()). Done() flow1 := table1.BuildFlow(100). MatchSrcMAC(srcMAC).MatchDstMAC(dstcMAC). @@ -680,14 +681,14 @@ func TestTLVMap(t *testing.T) { time.Sleep(1 * time.Second) flow1 := table.BuildFlow(100). MatchProtocol(binding.ProtocolIP).MatchTunMetadata(0, 0x1234). - Action().ResubmitToTable(table.GetNext()). + Action().NextTable(). Done() err = bridge.AddFlowsInBundle([]binding.Flow{flow1}, nil, nil) require.Nil(t, err) expectedFlows := []*ExpectFlow{ { MatchStr: "priority=100,ip,tun_metadata0=0x1234", - ActStr: fmt.Sprintf("resubmit(,%d)", table.GetNext()), + ActStr: fmt.Sprintf("goto_table:%d", table.GetNext()), }, } ovsCtlClient := ovsctl.NewClient(br) @@ -713,14 +714,14 @@ func TestMoveTunMetadata(t *testing.T) { flow1 := table.BuildFlow(100). MatchProtocol(binding.ProtocolIP).MatchTunMetadata(0, 0x1234). Action().MoveRange("NXM_NX_TUN_METADATA0", "NXM_NX_REG0", binding.Range{28, 31}, binding.Range{28, 31}). - Action().ResubmitToTable(table.GetNext()). + Action().NextTable(). Done() err = bridge.AddFlowsInBundle([]binding.Flow{flow1}, nil, nil) require.Nil(t, err) expectedFlows := []*ExpectFlow{ { MatchStr: "priority=100,ip,tun_metadata0=0x1234", - ActStr: fmt.Sprintf("move:NXM_NX_TUN_METADATA0[28..31]->NXM_NX_REG0[28..31],resubmit(,%d)", table.GetNext()), + ActStr: fmt.Sprintf("move:NXM_NX_TUN_METADATA0[28..31]->NXM_NX_REG0[28..31],goto_table:%d", table.GetNext()), }, } ovsCtlClient := ovsctl.NewClient(br) @@ -754,7 +755,7 @@ func TestFlowWithCTMatchers(t *testing.T) { MatchCTSrcPort(ctPortSrc). MatchCTDstPort(ctPortDst). MatchCTProtocol(binding.ProtocolTCP). - Action().ResubmitToTable(table.GetNext()). + Action().NextTable(). Done() flow2 := table.BuildFlow(priority). MatchProtocol(binding.ProtocolIP). @@ -762,17 +763,17 @@ func TestFlowWithCTMatchers(t *testing.T) { MatchCTSrcIPNet(*ctIPSrcNet). MatchCTDstIPNet(*ctIPDstNet). MatchCTProtocol(binding.ProtocolTCP). - Action().ResubmitToTable(table.GetNext()). + Action().NextTable(). Done() expectFlows := []*ExpectFlow{ {fmt.Sprintf("priority=%d,ct_state=+new,ct_nw_src=%s,ct_nw_dst=%s,ct_nw_proto=6,ct_tp_src=%d,ct_tp_dst=%d,ip", priority, ctIPSrc.String(), ctIPDst.String(), ctPortSrc, ctPortDst), - fmt.Sprintf("resubmit(,%d)", table.GetNext()), + fmt.Sprintf("goto_table:%d", table.GetNext()), }, { fmt.Sprintf("priority=%d,ct_state=+est,ct_nw_src=%s,ct_nw_dst=%s,ct_nw_proto=6,ip", priority, ctIPSrcNet.String(), ctIPDstNet.String()), - fmt.Sprintf("resubmit(,%d)", table.GetNext()), + fmt.Sprintf("goto_table:%d", table.GetNext()), }, } for _, f := range []binding.Flow{flow1, flow2} { @@ -808,7 +809,7 @@ func TestNoteAction(t *testing.T) { MatchProtocol(binding.ProtocolIP). MatchSrcIP(srcIP). Action().Note(testNotes). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done() convertNoteToHex := func(note string) string { @@ -855,21 +856,21 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { Cookie(getCookieID()). MatchInPort(podOFport). Action().LoadRegMark(fromLocalMark). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP). Cookie(getCookieID()). MatchInPort(podOFport). MatchARPSha(podMAC). MatchARPSpa(podIP). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). MatchInPort(podOFport). MatchSrcMAC(podMAC). MatchSrcIP(podIP). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP). Cookie(getCookieID()). @@ -896,7 +897,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { LoadFieldToField(regField0, regField0). LoadRegMark(mark1). Done(). // Finish learn action. - Action().ResubmitToTable(table.GetID()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). @@ -907,7 +908,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { MatchRegMark(fromGatewayMark). MatchCTMark(gatewayCTMark). MatchCTStateNew(false).MatchCTStateTrk(true). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). @@ -924,7 +925,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { MatchCTMark(gatewayCTMark). MatchCTStateNew(false).MatchCTStateTrk(true). Action().LoadRange(binding.NxmFieldDstMAC, gwMACData, &binding.Range{0, 47}). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). @@ -943,7 +944,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { Action().SetSrcMAC(gwMAC). Action().SetDstMAC(podMAC). Action().DecTTL(). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). @@ -952,7 +953,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { Action().SetSrcMAC(gwMAC). Action().SetDstMAC(vMAC). Action().SetTunnelDst(tunnelPeer). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIPv6). Cookie(getCookieID()). @@ -961,20 +962,20 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { Action().SetSrcMAC(gwMAC). Action().SetDstMAC(vMAC). Action().SetTunnelDst(tunnelPeerIPv6). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP). Cookie(getCookieID()). MatchDstIP(gwIP). Action().SetDstMAC(gwMAC). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal). Cookie(getCookieID()). MatchDstMAC(podMAC). Action().LoadToRegField(portCacheField, podOFport). Action().LoadRegMark(portFoundMark). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal). Cookie(getCookieID()). @@ -991,7 +992,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { MatchProtocol(binding.ProtocolIP). MatchSrcIP(podIP). MatchIPDSCP(ipDSCP). - Action().GotoTable(table.GetNext()). + Action().NextTable(). Done(), table.BuildFlow(priorityNormal+20).MatchProtocol(binding.ProtocolTCP).Cookie(getCookieID()).MatchDstPort(uint16(8080), nil). Action().Conjunction(uint32(1001), uint8(3), uint8(3)).Done(), @@ -1004,9 +1005,9 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { table.BuildFlow(priorityNormal+20).MatchProtocol(binding.ProtocolIP).Cookie(getCookieID()).MatchRegFieldWithValue(portCacheField, podOFport). Action().Conjunction(uint32(1001), uint8(2), uint8(3)).Done(), table.BuildFlow(priorityNormal+20).MatchProtocol(binding.ProtocolIP).Cookie(getCookieID()).MatchConjID(1001). - Action().GotoTable(table.GetNext()).Done(), + Action().NextTable().Done(), table.BuildFlow(priorityNormal+20).MatchProtocol(binding.ProtocolIP).Cookie(getCookieID()).MatchConjID(1001).MatchSrcIP(gwIP). - Action().GotoTable(table.GetNext()).Done(), + Action().NextTable().Done(), ) gotoTableAction := fmt.Sprintf("goto_table:%d", table.GetNext()) @@ -1017,7 +1018,7 @@ func prepareFlows(table binding.Table) ([]binding.Flow, []*ExpectFlow) { &ExpectFlow{"priority=200,ip,in_port=3,dl_src=aa:aa:aa:aa:aa:13,nw_src=192.168.1.3", gotoTableAction}, &ExpectFlow{"priority=200,arp,arp_tpa=192.168.2.1,arp_op=1", "move:NXM_OF_ETH_SRC[]->NXM_OF_ETH_DST[],set_field:aa:bb:cc:dd:ee:ff->eth_src,load:0x2->NXM_OF_ARP_OP[],move:NXM_NX_ARP_SHA[]->NXM_NX_ARP_THA[],set_field:aa:bb:cc:dd:ee:ff->arp_sha,move:NXM_OF_ARP_SPA[]->NXM_OF_ARP_TPA[],set_field:192.168.2.1->arp_spa,IN_PORT"}, &ExpectFlow{"priority=190,arp", "NORMAL"}, - &ExpectFlow{"priority=200,tcp", fmt.Sprintf("learn(table=%d,idle_timeout=10,priority=190,delete_learned,cookie=0x1,eth_type=0x800,nw_proto=6,NXM_OF_TCP_DST[],NXM_NX_REG0[0..15]=0xfff,load:NXM_NX_REG0[0..15]->NXM_NX_REG0[0..15],load:0xffe->NXM_NX_REG0[16..31]),resubmit(,%d)", table.GetID(), table.GetID())}, + &ExpectFlow{"priority=200,tcp", fmt.Sprintf("learn(table=%d,idle_timeout=10,priority=190,delete_learned,cookie=0x1,eth_type=0x800,nw_proto=6,NXM_OF_TCP_DST[],NXM_NX_REG0[0..15]=0xfff,load:NXM_NX_REG0[0..15]->NXM_NX_REG0[0..15],load:0xffe->NXM_NX_REG0[16..31]),goto_table:%d", table.GetID(), table.GetNext())}, &ExpectFlow{"priority=200,ip", fmt.Sprintf("ct(table=%d,zone=65520)", table.GetNext())}, &ExpectFlow{"priority=210,ct_state=-new+trk,ct_mark=0x2/0x2,ip,reg0=0x1/0xffff", gotoTableAction}, &ExpectFlow{"priority=200,ct_state=+new+trk,ip,reg0=0x1/0xffff", fmt.Sprintf("ct(commit,table=%d,zone=65520,exec(load:0x1->NXM_NX_CT_MARK[1])", table.GetNext())},