Skip to content

Commit

Permalink
add u2o test case (#2203)
Browse files Browse the repository at this point in the history
* add u2o test case
  • Loading branch information
changluyi committed Jan 6, 2023
1 parent f5d8011 commit 67024ec
Showing 1 changed file with 310 additions and 3 deletions.
313 changes: 310 additions & 3 deletions test/e2e/kube-ovn/underlay/underlay.go
Expand Up @@ -2,17 +2,19 @@ package underlay

import (
"fmt"
"net"
"os/exec"
"strconv"
"strings"
"time"

dockertypes "github.com/docker/docker/api/types"
"github.com/onsi/ginkgo/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"

"github.com/onsi/ginkgo/v2"

apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1"
"github.com/kubeovn/kube-ovn/pkg/util"
"github.com/kubeovn/kube-ovn/test/e2e/framework"
Expand All @@ -22,6 +24,7 @@ import (
)

const dockerNetworkName = "kube-ovn-vlan"
const curlListenPort = 8081

func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, linkMap map[string]*iproute.Link) *apiv1.ProviderNetwork {
var defaultInterface string
Expand All @@ -48,7 +51,7 @@ var _ = framework.Describe("[group:underlay]", func() {
var itFn func(bool)
var cs clientset.Interface
var nodeNames []string
var clusterName, providerNetworkName, vlanName, subnetName, namespaceName, podName string
var clusterName, providerNetworkName, vlanName, subnetName, podName, namespaceName, u2oPodNameUnderlay, u2oOverlaySubnetName, u2oPodNameOverlay string
var linkMap map[string]*iproute.Link
var routeMap map[string][]iproute.Route
var eventClient *framework.EventClient
Expand Down Expand Up @@ -76,6 +79,9 @@ var _ = framework.Describe("[group:underlay]", func() {
if image == "" {
image = framework.GetKubeOvnImage(cs)
}
u2oPodNameUnderlay = ""
u2oOverlaySubnetName = ""
u2oPodNameOverlay = ""

if skip {
ginkgo.Skip("underlay spec only runs on kind clusters")
Expand Down Expand Up @@ -261,6 +267,17 @@ var _ = framework.Describe("[group:underlay]", func() {
ginkgo.By("Deleting pod " + podName)
podClient.DeleteSync(podName)

if u2oPodNameUnderlay != "" {
ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay)
podClient.DeleteSync(u2oPodNameUnderlay)

ginkgo.By("Deleting overlay pod " + u2oPodNameOverlay)
podClient.DeleteSync(u2oPodNameOverlay)

ginkgo.By("Deleting subnet " + u2oOverlaySubnetName)
subnetClient.DeleteSync(u2oOverlaySubnetName)
}

ginkgo.By("Deleting subnet " + subnetName)
subnetClient.DeleteSync(subnetName)

Expand Down Expand Up @@ -417,4 +434,294 @@ var _ = framework.Describe("[group:underlay]", func() {
}
framework.ExpectTrue(found, "Address conflict should be reported in pod events")
})

framework.ConformanceIt("should support underlay to overlay subnet interconnection ", func() {
ginkgo.By("Creating provider network")
pn := makeProviderNetwork(providerNetworkName, false, linkMap)
_ = providerNetworkClient.CreateSync(pn)

ginkgo.By("Getting docker network " + dockerNetworkName)
network, err := docker.NetworkInspect(dockerNetworkName)
framework.ExpectNoError(err, "getting docker network "+dockerNetworkName)

ginkgo.By("Creating vlan " + vlanName)
vlan := framework.MakeVlan(vlanName, providerNetworkName, 0)
_ = vlanClient.Create(vlan)

ginkgo.By("Creating underlay subnet " + subnetName)
underlayCidr := make([]string, 0, 2)
gateway := make([]string, 0, 2)
for _, config := range dockerNetwork.IPAM.Config {
switch util.CheckProtocol(config.Subnet) {
case apiv1.ProtocolIPv4:
if f.ClusterIpFamily != "ipv6" {
underlayCidr = append(underlayCidr, config.Subnet)
gateway = append(gateway, config.Gateway)
}
case apiv1.ProtocolIPv6:
if f.ClusterIpFamily != "ipv4" {
underlayCidr = append(underlayCidr, config.Subnet)
gateway = append(gateway, config.Gateway)
}
}
}

excludeIPs := make([]string, 0, len(network.Containers)*2)
for _, container := range network.Containers {
if container.IPv4Address != "" && f.ClusterIpFamily != "ipv6" {
excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0])
}
if container.IPv6Address != "" && f.ClusterIpFamily != "ipv4" {
excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0])
}
}

subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(underlayCidr, ","), strings.Join(gateway, ","), excludeIPs, nil, []string{namespaceName})
subnet.Spec.U2OInterconnection = true
_ = subnetClient.CreateSync(subnet)
ginkgo.By("Creating underlay subnet pod")
annotations := map[string]string{
util.LogicalSwitchAnnotation: subnetName,
}

u2oPodNameUnderlay = "pod-" + framework.RandomSuffix()
args := []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)}
underlayPod := framework.MakePod(namespaceName, u2oPodNameUnderlay, nil, annotations, framework.AgnhostImage, nil, args)
underlayPod.Spec.Containers[0].ImagePullPolicy = corev1.PullIfNotPresent

originUnderlayPod := underlayPod.DeepCopy()
underlayPod = podClient.CreateSync(underlayPod)

// get subnet again because ipam change
subnet = subnetClient.Get(subnetName)

ginkgo.By("Creating overlay subnet")
u2oOverlaySubnetName = "subnet-" + framework.RandomSuffix()
cidr := framework.RandomCIDR(f.ClusterIpFamily)

overlaySubnet := framework.MakeSubnet(u2oOverlaySubnetName, "", cidr, "", nil, nil, nil)
overlaySubnet = subnetClient.CreateSync(overlaySubnet)

ginkgo.By("Creating overlay subnet pod")
u2oPodNameOverlay = "pod-" + framework.RandomSuffix()
overlayAnnotations := map[string]string{
util.LogicalSwitchAnnotation: overlaySubnet.Name,
}
args = []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)}
overlayPod := framework.MakePod(namespaceName, u2oPodNameOverlay, nil, overlayAnnotations, framework.AgnhostImage, nil, args)
overlayPod.Spec.Containers[0].ImagePullPolicy = corev1.PullIfNotPresent
overlayPod = podClient.CreateSync(overlayPod)

ginkgo.By("step1: Enable u2o check")
checkU2OItems(true, subnet, underlayPod, overlayPod)

ginkgo.By("step2: Disable u2o check")
podClient.DeleteSync(u2oPodNameUnderlay)

subnet = subnetClient.Get(subnetName)
modifiedSubnet := subnet.DeepCopy()
modifiedSubnet.Spec.U2OInterconnection = false
subnetClient.PatchSync(subnet, modifiedSubnet)
time.Sleep(5 * time.Second)

underlayPod = podClient.CreateSync(originUnderlayPod)
subnet = subnetClient.Get(subnetName)
checkU2OItems(false, subnet, underlayPod, overlayPod)

ginkgo.By("step3: recover enable u2o check")
podClient.DeleteSync(u2oPodNameUnderlay)

subnet = subnetClient.Get(subnetName)
modifiedSubnet = subnet.DeepCopy()
modifiedSubnet.Spec.U2OInterconnection = true
subnetClient.PatchSync(subnet, modifiedSubnet)
time.Sleep(5 * time.Second)
underlayPod = podClient.CreateSync(originUnderlayPod)
subnet = subnetClient.Get(subnetName)
checkU2OItems(true, subnet, underlayPod, overlayPod)

ginkgo.By("step4: check if kube-ovn-controller restart")
restartCmd := "kubectl rollout restart deployment kube-ovn-controller -n kube-system"
_, err = exec.Command("bash", "-c", restartCmd).CombinedOutput()
framework.ExpectNoError(err, "restart kube-ovn-controller")
checkU2OItems(true, subnet, underlayPod, overlayPod)

ginkgo.By("step5: Disable u2o check after restart kube-controller")
podClient.DeleteSync(u2oPodNameUnderlay)

subnet = subnetClient.Get(subnetName)
modifiedSubnet = subnet.DeepCopy()
modifiedSubnet.Spec.U2OInterconnection = false
subnetClient.PatchSync(subnet, modifiedSubnet)
time.Sleep(5 * time.Second)
underlayPod = podClient.CreateSync(originUnderlayPod)
subnet = subnetClient.Get(subnetName)
checkU2OItems(false, subnet, underlayPod, overlayPod)

ginkgo.By("step6: recover enable u2o check after restart kube-controller")
podClient.DeleteSync(u2oPodNameUnderlay)

subnet = subnetClient.Get(subnetName)
modifiedSubnet = subnet.DeepCopy()
modifiedSubnet.Spec.U2OInterconnection = true
subnetClient.PatchSync(subnet, modifiedSubnet)
time.Sleep(5 * time.Second)
underlayPod = podClient.CreateSync(originUnderlayPod)
subnet = subnetClient.Get(subnetName)
checkU2OItems(true, subnet, underlayPod, overlayPod)
})
})

func checkU2OItems(isEnableU2O bool, subnet *apiv1.Subnet, underlayPod, overlayPod *corev1.Pod) {

ginkgo.By("checking underlay subnet's u2o interconnect ip")
if isEnableU2O {
framework.ExpectTrue(subnet.Spec.U2OInterconnection)
framework.ExpectIPInCIDR(subnet.Status.U2OInterconnectionIP, subnet.Spec.CIDRBlock)
} else {
framework.ExpectFalse(subnet.Spec.U2OInterconnection)
framework.ExpectEmpty(subnet.Status.U2OInterconnectionIP)
}

v4gw, v6gw := util.SplitStringIP(subnet.Spec.Gateway)

underlayCidr := strings.Split(subnet.Spec.CIDRBlock, ",")
for _, cidr := range underlayCidr {
var protocolStr, gw string
if util.CheckProtocol(cidr) == apiv1.ProtocolIPv4 {
protocolStr = "ip4"
gw = v4gw
ginkgo.By("checking underlay subnet's using ips")
if isEnableU2O {
framework.ExpectEqual(int(subnet.Status.V4UsingIPs), 2)
} else {
framework.ExpectEqual(int(subnet.Status.V4UsingIPs), 1)
}
} else {
protocolStr = "ip6"
gw = v6gw
if isEnableU2O {
framework.ExpectEqual(int(subnet.Status.V6UsingIPs), 2)
} else {
framework.ExpectEqual(int(subnet.Status.V6UsingIPs), 1)
}
}
agName := strings.Replace(fmt.Sprintf("%s.u2o_exclude_ip.%s", subnet.Name, protocolStr), "-", ".", -1)
ginkgo.By(fmt.Sprintf("checking underlay subnet's policy1 route %s", protocolStr))
hitPolicyStr := fmt.Sprintf("%d %s.dst == $%s && %s.src == %s allow", util.SubnetRouterPolicyPriority, protocolStr, agName, protocolStr, cidr)
checkPolicy(hitPolicyStr, isEnableU2O)

ginkgo.By(fmt.Sprintf("checking underlay subnet's policy2 route %s", protocolStr))
hitPolicyStr = fmt.Sprintf("%d %s.dst == %s && %s.dst != $%s allow", util.SubnetRouterPolicyPriority, protocolStr, cidr, protocolStr, agName)
checkPolicy(hitPolicyStr, isEnableU2O)

ginkgo.By(fmt.Sprintf("checking underlay subnet's policy3 route %s", protocolStr))
hitPolicyStr = fmt.Sprintf("%d %s.src == %s reroute %s", util.GatewayRouterPolicyPriority, protocolStr, cidr, gw)
checkPolicy(hitPolicyStr, isEnableU2O)
}

ginkgo.By("checking underlay pod's ip route's nexthop equal the u2o interconnection ip")
routes, err := iproute.RouteShow("", "eth0", func(cmd ...string) ([]byte, []byte, error) {
return framework.KubectlExec(underlayPod.Namespace, underlayPod.Name, cmd...)
})
framework.ExpectNoError(err)
framework.ExpectNotEmpty(routes)

v4InterconnIp, v6InterconnIp := util.SplitStringIP(subnet.Status.U2OInterconnectionIP)

isV4DefaultRouteExist := false
isV6DefaultRouteExist := false
for _, route := range routes {
if route.Dst == "default" {
if util.CheckProtocol(route.Gateway) == apiv1.ProtocolIPv4 {
if isEnableU2O {
framework.ExpectEqual(route.Gateway, v4InterconnIp)
} else {
framework.ExpectEqual(route.Gateway, v4gw)
}
isV4DefaultRouteExist = true
} else {
if isEnableU2O {
framework.ExpectEqual(route.Gateway, v6InterconnIp)
} else {
framework.ExpectEqual(route.Gateway, v6gw)
}
isV6DefaultRouteExist = true
}
}
}

if subnet.Spec.Protocol == apiv1.ProtocolIPv4 {
framework.ExpectTrue(isV4DefaultRouteExist)
} else if subnet.Spec.Protocol == apiv1.ProtocolIPv6 {
framework.ExpectTrue(isV6DefaultRouteExist)
} else if subnet.Spec.Protocol == apiv1.ProtocolDual {
framework.ExpectTrue(isV4DefaultRouteExist)
framework.ExpectTrue(isV6DefaultRouteExist)
}

UPodIPs := underlayPod.Status.PodIPs
OPodIPs := overlayPod.Status.PodIPs
var v4UPodIP, v4OPodIP, v6UPodIP, v6OPodIP string
for _, UPodIP := range UPodIPs {
if util.CheckProtocol(UPodIP.IP) == apiv1.ProtocolIPv4 {
v4UPodIP = UPodIP.IP
} else {
v6UPodIP = UPodIP.IP
}
}
for _, OPodIP := range OPodIPs {
if util.CheckProtocol(OPodIP.IP) == apiv1.ProtocolIPv4 {
v4OPodIP = OPodIP.IP
} else {
v6OPodIP = OPodIP.IP
}
}

if v4UPodIP != "" && v4OPodIP != "" {
ginkgo.By("checking underlay pod access to overlay pod v4")
checkReachable(underlayPod.Name, underlayPod.Namespace, v4UPodIP, v4OPodIP, strconv.Itoa(curlListenPort), isEnableU2O)

ginkgo.By("checking overlay pod access to underlay pod v4")
checkReachable(overlayPod.Name, overlayPod.Namespace, v4OPodIP, v4UPodIP, strconv.Itoa(curlListenPort), isEnableU2O)
}

if v6UPodIP != "" && v6OPodIP != "" {
ginkgo.By("checking underlay pod access to overlay pod v6")
checkReachable(underlayPod.Name, underlayPod.Namespace, v6UPodIP, v6OPodIP, strconv.Itoa(curlListenPort), isEnableU2O)

ginkgo.By("checking overlay pod access to underlay pod v6")
checkReachable(overlayPod.Name, overlayPod.Namespace, v6OPodIP, v6UPodIP, strconv.Itoa(curlListenPort), isEnableU2O)
}
}

func checkReachable(podName, podNamespace, sourceIP, targetIP, targetPort string, expectReachable bool) {
ginkgo.By("checking curl reachable")
cmd := fmt.Sprintf("kubectl exec %s -n %s -- curl -q -s --connect-timeout 5 %s/clientip", podName, podNamespace, net.JoinHostPort(targetIP, targetPort))
output, _ := exec.Command("bash", "-c", cmd).CombinedOutput()
outputStr := string(output)
if expectReachable {
client, _, err := net.SplitHostPort(strings.TrimSpace(outputStr))
framework.ExpectNoError(err)
// check packet has not SNAT
framework.ExpectEqual(sourceIP, client)
} else {
isReachable := !strings.Contains(outputStr, "terminated with exit code")
framework.ExpectEqual(isReachable, expectReachable)
}
}

func checkPolicy(hitPolicyStr string, expectPolicyExist bool) {
policyExist := false
output, _ := exec.Command("bash", "-c", "kubectl ko nbctl lr-policy-list ovn-cluster").CombinedOutput()
outputStr := string(output)
lines := strings.Split(outputStr, "\n")
for _, line := range lines {
if strings.Contains(strings.Join(strings.Fields(line), " "), hitPolicyStr) {
policyExist = true
break
}

}
framework.ExpectEqual(policyExist, expectPolicyExist)
}

0 comments on commit 67024ec

Please sign in to comment.