diff --git a/go-controller/pkg/kube/kube.go b/go-controller/pkg/kube/kube.go index e976fd7808..2c2e72621d 100644 --- a/go-controller/pkg/kube/kube.go +++ b/go-controller/pkg/kube/kube.go @@ -241,7 +241,6 @@ func (k *Kube) UpdateEgressIP(eIP *egressipv1.EgressIP) error { } func (k *Kube) PatchEgressIP(name string, patchData []byte) error { - klog.Infof("Patching status on EgressIP %s", name) _, err := k.EIPClient.K8sV1().EgressIPs().Patch(context.TODO(), name, types.JSONPatchType, patchData, metav1.PatchOptions{}) return err } diff --git a/go-controller/pkg/ovn/egressip.go b/go-controller/pkg/ovn/egressip.go index bbae33c977..efd9953ab0 100644 --- a/go-controller/pkg/ovn/egressip.go +++ b/go-controller/pkg/ovn/egressip.go @@ -865,9 +865,6 @@ func (oc *Controller) addPodEgressIPAssignments(name string, statusAssignments [ podIPs: logicalPort.ips, } oc.eIPC.podAssignment[podKey] = podState - if err := oc.eIPC.deletePerPodGRSNAT(pod, logicalPort.ips); err != nil { - return err - } } else { for _, status := range statusAssignments { if _, exists := podState.egressStatuses[status]; !exists { @@ -912,13 +909,11 @@ func (oc *Controller) deleteEgressIPAssignments(name string, statusesToRemove [] } for podKey, podStatus := range oc.eIPC.podAssignment { delete(podStatus.egressStatuses, statusToRemove) - if len(podStatus.egressStatuses) == 0 { - podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) - if err := oc.eIPC.addPerPodGRSNAT(podNamespace, podName, podStatus.podIPs); err != nil { - return err - } - delete(oc.eIPC.podAssignment, podKey) + podNamespace, podName := getPodNamespaceAndNameFromKey(podKey) + if err := oc.eIPC.addPerPodGRSNAT(podNamespace, podName, podStatus.podIPs); err != nil { + return err } + delete(oc.eIPC.podAssignment, podKey) } } return nil @@ -962,9 +957,6 @@ func (oc *Controller) deletePodEgressIPAssignments(name string, statusesToRemove } delete(podStatus.egressStatuses, statusToRemove) } - if len(podStatus.egressStatuses) > 0 { - return nil - } if err := oc.eIPC.addPerPodGRSNAT(pod.Namespace, pod.Name, podStatus.podIPs); err != nil { return err } @@ -1243,6 +1235,7 @@ type EgressIPPatchStatus struct { // object update which risks resetting the EgressIP object's fields to the state // they had when we started processing the change. func (oc *Controller) patchReplaceEgressIPStatus(name string, statusItems []egressipv1.EgressIPStatusItem) error { + klog.Infof("Patching status on EgressIP %s: %v", name, statusItems) return retry.RetryOnConflict(retry.DefaultRetry, func() error { t := []EgressIPPatchStatus{ { @@ -1680,6 +1673,9 @@ type egressIPController struct { // (routing pod traffic to the egress node) and NAT objects on the egress node // (SNAT-ing to the egress IP). func (e *egressIPController) addPodEgressIPAssignment(egressIPName string, status egressipv1.EgressIPStatusItem, pod *kapi.Pod, podIPs []*net.IPNet) (err error) { + if err := e.deletePerPodGRSNAT(pod, podIPs, status); err != nil { + return err + } if err := e.handleEgressReroutePolicy(podIPs, status, egressIPName, e.createEgressReroutePolicy); err != nil { return fmt.Errorf("unable to create logical router policy, err: %v", err) } @@ -1733,9 +1729,10 @@ func (e *egressIPController) addPerPodGRSNAT(podNamespace, podName string, podIP return nil } -func (e *egressIPController) deletePerPodGRSNAT(pod *kapi.Pod, podIPs []*net.IPNet) error { - if config.Gateway.DisableSNATMultipleGWs { - // remove snats to->nodeIP (from the node where pod exists) for these podIPs before adding the snat to->egressIP +func (e *egressIPController) deletePerPodGRSNAT(pod *kapi.Pod, podIPs []*net.IPNet, status egressipv1.EgressIPStatusItem) error { + if config.Gateway.DisableSNATMultipleGWs && status.Node == pod.Spec.NodeName { + // remove snats to->nodeIP (from the node where pod exists if that node is also serving + // as an egress node for this pod) for these podIPs before adding the snat to->egressIP extIPs, err := getExternalIPsGRSNAT(e.watchFactory, pod.Spec.NodeName) if err != nil { return err @@ -1744,6 +1741,9 @@ func (e *egressIPController) deletePerPodGRSNAT(pod *kapi.Pod, podIPs []*net.IPN if err != nil { return err } + } else if config.Gateway.DisableSNATMultipleGWs { + // it means the node on which the pod is is different from the egressNode that is managing the pod + klog.V(5).Infof("Not deleting SNAT on %s since egress node managing %s/%s is %s", pod.Spec.NodeName, pod.Namespace, pod.Name, status.Node) } return nil } diff --git a/go-controller/pkg/ovn/egressip_test.go b/go-controller/pkg/ovn/egressip_test.go index e5e998690f..2341aac10d 100644 --- a/go-controller/pkg/ovn/egressip_test.go +++ b/go-controller/pkg/ovn/egressip_test.go @@ -47,7 +47,7 @@ const ( v6NodeSubnet = "ae70::66/64" v4ClusterSubnet = "10.128.0.0/14" v4NodeSubnet = "10.128.0.0/24" - podName = "egress_pod" + podName = "egress-pod" egressIPName = "egressip" ) @@ -4001,6 +4001,690 @@ var _ = ginkgo.Describe("OVN master EgressIP Operations", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) + ginkgo.It("should ensure SNATs towards egressIP and nodeIP are correctly configured during egressIP re-assignment", func() { + app.Action = func(ctx *cli.Context) error { + config.Gateway.DisableSNATMultipleGWs = true + + egressIP1 := "192.168.126.101" + egressIP2 := "192.168.126.102" + node1IPv4 := "192.168.126.12/24" + node2IPv4 := "192.168.126.51/24" + + egressPod1 := *newPodWithLabels(namespace, podName, node1Name, podV4IP, egressPodLabel) + egressPod2 := *newPodWithLabels(namespace, "egress-pod2", node2Name, "10.128.0.16", egressPodLabel) + egressNamespace := newNamespace(namespace) + + node1 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node1Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\"}", node1IPv4), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\"}", v4NodeSubnet), + "k8s.ovn.org/l3-gateway-config": `{"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.12/24", "next-hop":"192.168.126.1"}}`, + "k8s.ovn.org/node-chassis-id": "79fdcfc4-6fe6-4cd3-8242-c0f85a4668ec", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + }, + } + node2 := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: node2Name, + Annotations: map[string]string{ + "k8s.ovn.org/node-primary-ifaddr": fmt.Sprintf("{\"ipv4\": \"%s\"}", node2IPv4), + "k8s.ovn.org/node-subnets": fmt.Sprintf("{\"default\":\"%s\"}", v4NodeSubnet), + "k8s.ovn.org/l3-gateway-config": `{"default":{"mode":"local","mac-address":"7e:57:f8:f0:3c:49", "ip-address":"192.168.126.51/24", "next-hop":"192.168.126.1"}}`, + "k8s.ovn.org/node-chassis-id": "89fdcfc4-6fe6-4cd3-8242-c0f85a4668ec", + }, + }, + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ + { + Type: v1.NodeReady, + Status: v1.ConditionTrue, + }, + }, + }, + } + + eIP := egressipv1.EgressIP{ + ObjectMeta: newEgressIPMeta(egressIPName), + Spec: egressipv1.EgressIPSpec{ + EgressIPs: []string{egressIP1, egressIP2}, + PodSelector: metav1.LabelSelector{ + MatchLabels: egressPodLabel, + }, + NamespaceSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": egressNamespace.Name, + }, + }, + }, + Status: egressipv1.EgressIPStatus{ + Items: []egressipv1.EgressIPStatusItem{}, + }, + } + + fakeOvn.startWithDBSetup( + libovsdbtest.TestSetup{ + NBData: []libovsdbtest.TestData{ + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + }, + }, + }, + }, + &egressipv1.EgressIPList{ + Items: []egressipv1.EgressIP{eIP}, + }, + &v1.NodeList{ + Items: []v1.Node{node1, node2}, + }, + &v1.NamespaceList{ + Items: []v1.Namespace{*egressNamespace}, + }, + &v1.PodList{ + Items: []v1.Pod{egressPod1, egressPod2}, + }, + ) + + i, n, _ := net.ParseCIDR(podV4IP + "/23") + n.IP = i + fakeOvn.controller.logicalPortCache.add("", util.GetLogicalPortName(egressPod1.Namespace, egressPod1.Name), "", nil, []*net.IPNet{n}) + i, n, _ = net.ParseCIDR("10.128.0.16" + "/23") + n.IP = i + fakeOvn.controller.logicalPortCache.add("", util.GetLogicalPortName(egressPod2.Namespace, egressPod2.Name), "", nil, []*net.IPNet{n}) + + fakeOvn.controller.WatchEgressIPNamespaces() + fakeOvn.controller.WatchEgressIPPods() + fakeOvn.controller.WatchEgressNodes() + fakeOvn.controller.WatchEgressIP() + + expectedDatabaseState := []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + }, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(0)) + gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeFalse()) + gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeFalse()) + + node1.Labels = map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + + _, err := fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Update(context.TODO(), &node1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeTrue()) + gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeFalse()) + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) + gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(1)) + eips, nodes := getEgressIPStatus(egressIPName) + gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) + + expectedNatLogicalPort1 := "k8s-node1" + expectedDatabaseState = []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2"}, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID1", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2"}, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID2", + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", "reroute-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Nat: []string{"egressip-nat-UUID1", "egressip-nat-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + }, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + node2.Labels = map[string]string{ + "k8s.ovn.org/egress-assignable": "", + } + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Update(context.TODO(), &node2, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Eventually(getEgressIPAllocatorSizeSafely).Should(gomega.Equal(2)) + gomega.Expect(fakeOvn.controller.eIPC.allocator.cache).To(gomega.HaveKey(node1.Name)) + gomega.Expect(fakeOvn.controller.eIPC.allocator.cache).To(gomega.HaveKey(node2.Name)) + gomega.Eventually(isEgressAssignableNode(node1.Name)).Should(gomega.BeTrue()) + gomega.Eventually(isEgressAssignableNode(node2.Name)).Should(gomega.BeTrue()) + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(2)) + gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(0)) + + eips, nodes = getEgressIPStatus(egressIPName) + gomega.Expect(nodes[0]).To(gomega.Equal(node1.Name)) + gomega.Expect(nodes[1]).To(gomega.Equal(node2.Name)) + + expectedNatLogicalPort2 := "k8s-node2" + expectedDatabaseState = []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2", "100.64.0.3"}, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID1", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: []string{"100.64.0.2", "100.64.0.3"}, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID2", + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID3", + LogicalIP: podV4IP, + ExternalIP: eips[1], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort2, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID4", + LogicalIP: "10.128.0.16", + ExternalIP: eips[1], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort2, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", "reroute-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Nat: []string{"egressip-nat-UUID1", "egressip-nat-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Nat: []string{"egressip-nat-UUID3", "egressip-nat-UUID4"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + // remove label from node2 + node2.Labels = map[string]string{} + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Update(context.TODO(), &node2, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(1)) + gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(1)) + + expectedDatabaseState = []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod1.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID1", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.EgressIPReroutePriority, + Match: fmt.Sprintf("ip4.src == %s", egressPod2.Status.PodIP), + Action: nbdb.LogicalRouterPolicyActionReroute, + Nexthops: nodeLogicalRouterIPv4, + ExternalIDs: map[string]string{ + "name": eIP.Name, + }, + UUID: "reroute-UUID2", + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID2", + LogicalIP: "10.128.0.16", + ExternalIP: eips[0], + ExternalIDs: map[string]string{ + "name": egressIPName, + }, + Type: nbdb.NATTypeSNAT, + LogicalPort: &expectedNatLogicalPort1, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID3", + LogicalIP: "10.128.0.16", + ExternalIP: "192.168.126.51", // adds back SNAT towards nodeIP + Type: nbdb.NATTypeSNAT, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID", "reroute-UUID1", "reroute-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Nat: []string{"egressip-nat-UUID1", "egressip-nat-UUID2"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Nat: []string{"egressip-nat-UUID3"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + "nat-addresses": "router", + "exclude-lb-vips-from-garp": "true", + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + }, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + // remove label from node1 + node1.Labels = map[string]string{} + + _, err = fakeOvn.fakeClient.KubeClient.CoreV1().Nodes().Update(context.TODO(), &node1, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + gomega.Eventually(getEgressIPStatusLen(egressIPName)).Should(gomega.Equal(0)) + gomega.Eventually(getEgressIPReassignmentCount).Should(gomega.Equal(1)) // though 2 egressIPs to be re-assigned its only 1 egressIP object + + expectedDatabaseState = []libovsdbtest.TestData{ + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: "ip4.src == 10.128.0.0/14 && ip4.dst == 10.128.0.0/14", + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-UUID", + }, + &nbdb.LogicalRouterPolicy{ + Priority: types.DefaultNoRereoutePriority, + Match: fmt.Sprintf("ip4.src == 10.128.0.0/14 && ip4.dst == %s", config.Gateway.V4JoinSubnet), + Action: nbdb.LogicalRouterPolicyActionAllow, + UUID: "no-reroute-service-UUID", + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID1", + LogicalIP: podV4IP, + ExternalIP: "192.168.126.12", // adds back SNAT towards nodeIP + Type: nbdb.NATTypeSNAT, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.NAT{ + UUID: "egressip-nat-UUID3", + LogicalIP: "10.128.0.16", + ExternalIP: "192.168.126.51", + Type: nbdb.NATTypeSNAT, + Options: map[string]string{ + "stateless": "false", + }, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.OVNClusterRouter, + UUID: ovntypes.OVNClusterRouter + "-UUID", + Policies: []string{"no-reroute-UUID", "no-reroute-service-UUID"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node1.Name, + UUID: ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Nat: []string{"egressip-nat-UUID1"}, + }, + &nbdb.LogicalRouter{ + Name: ovntypes.GWRouterPrefix + node2.Name, + UUID: ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Nat: []string{"egressip-nat-UUID3"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node2.Name, + Networks: []string{"100.64.0.3/29"}, + }, + &nbdb.LogicalRouterPort{ + UUID: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name + "-UUID", + Name: ovntypes.GWRouterToJoinSwitchPrefix + ovntypes.GWRouterPrefix + node1.Name, + Networks: []string{"100.64.0.2/29"}, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node1Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node1Name, + }, + }, + &nbdb.LogicalSwitchPort{ + UUID: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name + "UUID", + Name: types.EXTSwitchToGWRouterPrefix + types.GWRouterPrefix + node2Name, + Type: "router", + Options: map[string]string{ + "router-port": types.GWRouterToExtSwitchPrefix + "GR_" + node2Name, + }, + }, + } + gomega.Eventually(fakeOvn.nbClient).Should(libovsdbtest.HaveData(expectedDatabaseState)) + + return nil + } + + err := app.Run([]string{app.Name}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }) + ginkgo.It("should re-balance EgressIPs when their node is removed", func() { app.Action = func(ctx *cli.Context) error { diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index f1f1ddb486..07caeb4783 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -401,6 +401,15 @@ func getApiAddress() string { return apiServer.String() } +// IsGatewayModeLocal returns true if the gateway mode is local +func IsGatewayModeLocal() bool { + anno, err := framework.RunKubectl("default", "get", "node", "ovn-control-plane", "-o", "template", "--template={{.metadata.annotations}}") + if err != nil { + return false + } + return strings.Contains(anno, "local") +} + // runCommand runs the cmd and returns the combined stdout and stderr func runCommand(cmd ...string) (string, error) { output, err := exec.Command(cmd[0], cmd[1:]...).CombinedOutput() @@ -883,7 +892,14 @@ var _ = ginkgo.Describe("e2e egress IP validation", func() { } return false, nil } - targetNodeLogs, err := runCommand("docker", "logs", targetNode.name) + var targetNodeLogs string + if strings.Contains(targetNode.name, "-host-net-pod") { + // host-networked-pod + targetNodeLogs, err = framework.RunKubectl(podNamespace, "logs", targetNode.name) + } else { + // external container + targetNodeLogs, err = runCommand("docker", "logs", targetNode.name) + } if err != nil { framework.Logf("failed to inspect logs in test container: %v", err) return false, nil @@ -1160,6 +1176,184 @@ spec: framework.ExpectNoError(err, "Step 18. Check connectivity from the remaining pod to an external \"node\" and verify that the IP is the remaining egress IP, failed, err: %v", err) }) + // Validate the egress IP by creating a httpd container on the kind networking + // (effectively seen as "outside" the cluster) and curl it from a pod in the cluster + // which matches the egress IP stanza. Aim is to check if the SNATs towards nodeIP versus + // SNATs towards egressIPs are being correctly deleted and recreated. + // NOTE: See https://bugzilla.redhat.com/show_bug.cgi?id=2078222 for details on inconsistency + + /* This test does the following: + 0. Add the "k8s.ovn.org/egress-assignable" label to egress1Node + 1. Creating host-networked pod, on non-egress node (egress2Node) to act as "another node" + 2. Create an EgressIP object with one egress IP defined + 3. Check that the status is of length one and that it is assigned to egress1Node + 4. Create one pod matching the EgressIP: running on egress1Node + 5. Check connectivity from pod to an external "node" and verify that the srcIP is the expected egressIP + 6. Check connectivity from pod to another node (egress2Node) and verify that the srcIP is the expected [egressIP if SGW] & [nodeIP if LGW] + 7. Add the "k8s.ovn.org/egress-assignable" label to egress2Node + 8. Remove the "k8s.ovn.org/egress-assignable" label from egress1Node + 9. Check that the status is of length one and that it is assigned to egress2Node + 10. Check connectivity from pod to an external "node" and verify that the srcIP is the expected egressIP + 11. Check connectivity from pod to another node (egress2Node) and verify that the srcIP is the expected nodeIP (known inconsistency compared to step 6 for SGW) + 12. Create second pod not matching the EgressIP: running on egress1Node + 13. Check connectivity from second pod to external node and verify that the srcIP is the expected nodeIP + 14. Add pod selector label to make second pod egressIP managed + 15. Check connectivity from second pod to external node and verify that the srcIP is the expected egressIP + 16. Check connectivity from second pod to another node (egress2Node) and verify that the srcIP is the expected nodeIP (this verifies SNAT's towards nodeIP are not deleted for pods unless pod is on its own egressNode) + */ + ginkgo.It("Should validate the egress IP SNAT functionality against host-networked pods", func() { + + command := []string{"/agnhost", "netexec", fmt.Sprintf("--http-port=%s", podHTTPPort)} + + ginkgo.By("0. Add the \"k8s.ovn.org/egress-assignable\" label to egress1Node node") + framework.AddOrUpdateLabelOnNode(f.ClientSet, egress1Node.name, "k8s.ovn.org/egress-assignable", "dummy") + framework.Logf("Added egress-assignable label to node %s", egress1Node.name) + framework.ExpectNodeHasLabel(f.ClientSet, egress1Node.name, "k8s.ovn.org/egress-assignable", "dummy") + + ginkgo.By("1. Creating host-networked pod, on non-egress node to act as \"another node\"") + _, err := createPod(f, egress2Node.name+"-host-net-pod", egress2Node.name, f.Namespace.Name, []string{}, map[string]string{}, func(p *v1.Pod) { + p.Spec.HostNetwork = true + p.Spec.Containers[0].Image = "docker.io/httpd" + }) + framework.ExpectNoError(err) + hostNetPod := node{ + name: egress2Node.name + "-host-net-pod", + nodeIP: egress2Node.nodeIP, + } + framework.Logf("Created pod %s on node %s", hostNetPod.name, egress2Node.name) + + podNamespace := f.Namespace + podNamespace.Labels = map[string]string{ + "name": f.Namespace.Name, + } + updateNamespace(f, podNamespace) + + ginkgo.By("2. Create an EgressIP object with one egress IP defined") + dupIP := func(ip net.IP) net.IP { + dup := make(net.IP, len(ip)) + copy(dup, ip) + return dup + } + // Assign the egress IP without conflicting with any node IP, + // the kind subnet is /16 or /64 so the following should be fine. + egressNodeIP := net.ParseIP(egress1Node.nodeIP) + egressIP1 := dupIP(egressNodeIP) + egressIP1[len(egressIP1)-2]++ + + var egressIPConfig = fmt.Sprintf(`apiVersion: k8s.ovn.org/v1 +kind: EgressIP +metadata: + name: ` + svcname + ` +spec: + egressIPs: + - ` + egressIP1.String() + ` + podSelector: + matchLabels: + wants: egress + namespaceSelector: + matchLabels: + name: ` + f.Namespace.Name + ` +`) + if err := ioutil.WriteFile(egressIPYaml, []byte(egressIPConfig), 0644); err != nil { + framework.Failf("Unable to write CRD config to disk: %v", err) + } + defer func() { + if err := os.Remove(egressIPYaml); err != nil { + framework.Logf("Unable to remove the CRD config from disk: %v", err) + } + }() + + framework.Logf("Create the EgressIP configuration") + framework.RunKubectlOrDie("default", "create", "-f", egressIPYaml) + + ginkgo.By("3. Check that the status is of length one and that it is assigned to egress1Node") + statuses := verifyEgressIPStatusLengthEquals(1) + if statuses[0].Node != egress1Node.name { + framework.Failf("Step 2. Check that the status is of length one and that it is assigned to egress1Node, failed") + } + + ginkgo.By("4. Create one pod matching the EgressIP: running on egress1Node") + createGenericPodWithLabel(f, pod1Name, pod1Node.name, f.Namespace.Name, command, podEgressLabel) + + err = wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) { + kubectlOut := getPodAddress(pod1Name, f.Namespace.Name) + srcIP := net.ParseIP(kubectlOut) + if srcIP == nil { + return false, nil + } + return true, nil + }) + framework.ExpectNoError(err, "Step 4. Create one pod matching the EgressIP: running on egress1Node, failed, err: %v", err) + framework.Logf("Created pod %s on node %s", pod1Name, pod1Node.name) + + ginkgo.By("5. Check connectivity from pod to an external node and verify that the srcIP is the expected egressIP") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(targetNode, pod1Name, podNamespace.Name, true, []string{egressIP1.String()})) + framework.ExpectNoError(err, "Step 5. Check connectivity from pod to an external node and verify that the srcIP is the expected egressIP, failed: %v", err) + + ginkgo.By("6. Check connectivity from pod to another node and verify that the srcIP is the expected [egressIP if SGW] & [nodeIP if LGW]") + var verifyIP string + if IsGatewayModeLocal() { + verifyIP = egressNodeIP.String() + } else { + verifyIP = egressIP1.String() + } + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(hostNetPod, pod1Name, podNamespace.Name, true, []string{verifyIP})) + framework.ExpectNoError(err, "Step 6. Check connectivity from pod to another node and verify that the srcIP is the expected [egressIP if SGW] & [nodeIP if LGW], failed: %v", err) + + ginkgo.By("7. Add the \"k8s.ovn.org/egress-assignable\" label to egress2Node") + framework.AddOrUpdateLabelOnNode(f.ClientSet, egress2Node.name, "k8s.ovn.org/egress-assignable", "dummy") + framework.Logf("Added egress-assignable label to node %s", egress2Node.name) + framework.ExpectNodeHasLabel(f.ClientSet, egress2Node.name, "k8s.ovn.org/egress-assignable", "dummy") + + ginkgo.By("8. Remove the \"k8s.ovn.org/egress-assignable\" label from egress1Node") + framework.RemoveLabelOffNode(f.ClientSet, egress1Node.name, "k8s.ovn.org/egress-assignable") + + ginkgo.By("9. Check that the status is of length one and that it is assigned to egress2Node") + statuses = verifyEgressIPStatusLengthEquals(1) + // There is sometimes a slight delay for the EIP fail over to happen, + // so let's use the pollimmediate struct to check if eventually egress2Node becomes the egress node + err = wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) { return statuses[0].Node == egress2Node.name, nil }) + framework.ExpectNoError(err, "Step 9. Check that the status is of length one and that it is assigned to egress2Node, failed: %v", err) + + ginkgo.By("10. Check connectivity from pod to an external \"node\" and verify that the srcIP is the expected egressIP") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(targetNode, pod1Name, podNamespace.Name, true, []string{egressIP1.String()})) + framework.ExpectNoError(err, "Step 10. Check connectivity from pod to an external \"node\" and verify that the srcIP is the expected egressIP, failed, err: %v", err) + + ginkgo.By("11. Check connectivity from pod to another node and verify that the srcIP is the expected nodeIP (known inconsistency compared to step 6 for SGW)") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(hostNetPod, pod1Name, podNamespace.Name, true, []string{egressNodeIP.String()})) + framework.ExpectNoError(err, "Step 11. Check connectivity from pod to another node and verify that the srcIP is the expected nodeIP (known inconsistency compared to step 6), failed: %v", err) + + ginkgo.By("12. Create second pod not matching the EgressIP: running on egress1Node") + createGenericPodWithLabel(f, pod2Name, pod1Node.name, f.Namespace.Name, command, map[string]string{}) + err = wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) { + kubectlOut := getPodAddress(pod2Name, f.Namespace.Name) + srcIP := net.ParseIP(kubectlOut) + if srcIP == nil { + return false, nil + } + return true, nil + }) + framework.ExpectNoError(err, "Step 12. Create second pod not matching the EgressIP: running on egress1Node, failed, err: %v", err) + framework.Logf("Created pod %s on node %s", pod2Name, pod1Node.name) + + ginkgo.By("13. Check connectivity from second pod to external node and verify that the srcIP is the expected nodeIP") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(targetNode, pod2Name, podNamespace.Name, true, []string{egressNodeIP.String()})) + framework.ExpectNoError(err, "Step 13. Check connectivity from second pod to external node and verify that the srcIP is the expected nodeIP, failed: %v", err) + + ginkgo.By("14. Add pod selector label to make second pod egressIP managed") + pod2 := getPod(f, pod2Name) + pod2.Labels = podEgressLabel + updatePod(f, pod2) + + ginkgo.By("15. Check connectivity from second pod to external node and verify that the srcIP is the expected egressIP") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(targetNode, pod2Name, podNamespace.Name, true, []string{egressIP1.String()})) + framework.ExpectNoError(err, "Step 15. Check connectivity from second pod to external node and verify that the srcIP is the expected egressIP, failed: %v", err) + + ginkgo.By("16. Check connectivity from second pod to another node and verify that the srcIP is the expected nodeIP (this verifies SNAT's towards nodeIP are not deleted unless node is egressNode)") + err = wait.PollImmediate(retryInterval, retryTimeout, targetExternalContainerAndTest(hostNetPod, pod2Name, podNamespace.Name, true, []string{egressNodeIP.String()})) + framework.ExpectNoError(err, "Step 16. Check connectivity from second pod to another node and verify that the srcIP is the expected nodeIP (this verifies SNAT's towards nodeIP are not deleted unless node is egressNode), failed: %v", err) + }) + // Validate the egress IP works with egress firewall by creating two httpd // containers on the kind networking (effectively seen as "outside" the cluster) // and curl them from a pod in the cluster which matches the egress IP stanza.