Skip to content

Commit

Permalink
Merge pull request #103409 from andrewsykim/service-internal-traffic-…
Browse files Browse the repository at this point in the history
…policy-e2e

test/e2e/network: add test for Service internalTrafficPolicy
  • Loading branch information
k8s-ci-robot committed Jul 7, 2021
2 parents 17f6f28 + 04d59ff commit b289fbb
Show file tree
Hide file tree
Showing 2 changed files with 349 additions and 0 deletions.
320 changes: 320 additions & 0 deletions test/e2e/network/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -2030,6 +2030,326 @@ var _ = common.SIGDescribe("Services", func() {
}
})

ginkgo.It("should respect internalTrafficPolicy=Local Pod to Pod [Feature:ServiceInternalTrafficPolicy]", func() {
// windows kube-proxy does not support this feature yet
// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
e2eskipper.SkipIfNodeOSDistroIs("windows")

// This behavior is not supported if Kube-proxy is in "userspace" mode.
// So we check the kube-proxy mode and skip this test if that's the case.
if proxyMode, err := proxyMode(f); err == nil {
if proxyMode == "userspace" {
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
}
} else {
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
}

nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
nodeCounts := len(nodes.Items)
if nodeCounts < 2 {
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
}
node0 := nodes.Items[0]
node1 := nodes.Items[1]

serviceName := "svc-itp"
ns := f.Namespace.Name
servicePort := 80

ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
local := v1.ServiceInternalTrafficPolicyLocal
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
}
svc.Spec.InternalTrafficPolicy = &local
})
framework.ExpectNoError(err)

ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
webserverPod0.Labels = jig.Labels
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})

_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))

validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})

ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})

pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))

pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})

pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))

// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
for i := 0; i < 5; i++ {
// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)

// the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP")
}
})

ginkgo.It("should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod [Feature:ServiceInternalTrafficPolicy]", func() {
// windows kube-proxy does not support this feature yet
// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
e2eskipper.SkipIfNodeOSDistroIs("windows")

// This behavior is not supported if Kube-proxy is in "userspace" mode.
// So we check the kube-proxy mode and skip this test if that's the case.
if proxyMode, err := proxyMode(f); err == nil {
if proxyMode == "userspace" {
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
}
} else {
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
}

nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
nodeCounts := len(nodes.Items)
if nodeCounts < 2 {
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
}
node0 := nodes.Items[0]
node1 := nodes.Items[1]

serviceName := "svc-itp"
ns := f.Namespace.Name
servicePort := 80

ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
local := v1.ServiceInternalTrafficPolicyLocal
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
}
svc.Spec.InternalTrafficPolicy = &local
})
framework.ExpectNoError(err)

ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
webserverPod0.Labels = jig.Labels
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})

_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))

validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})

ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
pausePod0.Spec.HostNetwork = true
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})

pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))

pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
pausePod1.Spec.HostNetwork = true
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})

pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))

// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
for i := 0; i < 5; i++ {
// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)

// the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP")
}
})

ginkgo.It("should respect internalTrafficPolicy=Local Pod to Pod (hostNetwork: true) [Feature:ServiceInternalTrafficPolicy]", func() {
// windows kube-proxy does not support this feature yet
// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
e2eskipper.SkipIfNodeOSDistroIs("windows")

// This behavior is not supported if Kube-proxy is in "userspace" mode.
// So we check the kube-proxy mode and skip this test if that's the case.
if proxyMode, err := proxyMode(f); err == nil {
if proxyMode == "userspace" {
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
}
} else {
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
}

nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
nodeCounts := len(nodes.Items)
if nodeCounts < 2 {
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
}
node0 := nodes.Items[0]
node1 := nodes.Items[1]

serviceName := "svc-itp"
ns := f.Namespace.Name
servicePort := 80

ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
local := v1.ServiceInternalTrafficPolicyLocal
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
}
svc.Spec.InternalTrafficPolicy = &local
})
framework.ExpectNoError(err)

ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
webserverPod0.Labels = jig.Labels
webserverPod0.Spec.HostNetwork = true
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})

_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))

validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})

ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})

pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))

pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})

pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))

// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
for i := 0; i < 5; i++ {
// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
// note that the expected hostname is the node name because the backend pod is on host network
execHostnameTest(*pausePod0, serviceAddress, node0.Name)

// the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP")
}
})

ginkgo.It("should respect internalTrafficPolicy=Local Pod (hostNetwork: true) to Pod (hostNetwork: true) [Feature:ServiceInternalTrafficPolicy]", func() {
// windows kube-proxy does not support this feature yet
// TODO: remove this skip when windows-based proxies implement internalTrafficPolicy
e2eskipper.SkipIfNodeOSDistroIs("windows")

// This behavior is not supported if Kube-proxy is in "userspace" mode.
// So we check the kube-proxy mode and skip this test if that's the case.
if proxyMode, err := proxyMode(f); err == nil {
if proxyMode == "userspace" {
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
}
} else {
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
}

nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
framework.ExpectNoError(err)
nodeCounts := len(nodes.Items)
if nodeCounts < 2 {
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
}
node0 := nodes.Items[0]
node1 := nodes.Items[1]

serviceName := "svc-itp"
ns := f.Namespace.Name
servicePort := 80

ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP and internalTrafficPolicy=Local in namespace " + ns)
local := v1.ServiceInternalTrafficPolicyLocal
jig := e2eservice.NewTestJig(cs, ns, serviceName)
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
}
svc.Spec.InternalTrafficPolicy = &local
})
framework.ExpectNoError(err)

ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort))
webserverPod0.Labels = jig.Labels
webserverPod0.Spec.HostNetwork = true
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})

_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))

validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})

ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
pausePod0.Spec.HostNetwork = true
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})

pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))

pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
pausePod1.Spec.HostNetwork = true
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})

pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))

// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
for i := 0; i < 5; i++ {
// the first pause pod should be on the same node as the webserver, so it can connect to the local pod using clusterIP
// note that the expected hostname is the node name because the backend pod is on host network
execHostnameTest(*pausePod0, serviceAddress, node0.Name)

// the second pause pod is on a different node, so it should see a connection error every time
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
framework.ExpectError(err, "expected error when trying to connect to cluster IP")
}
})

/*
Release: v1.18
Testname: Find Kubernetes Service in default Namespace
Expand Down
29 changes: 29 additions & 0 deletions test/e2e/network/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,35 @@ func execSourceIPTest(sourcePod v1.Pod, targetAddr string) (string, string) {
return sourcePod.Status.PodIP, host
}

// execHostnameTest executes curl to access "/hostname" endpoint on target address
// from given Pod to check the hostname of the target destination.
func execHostnameTest(sourcePod v1.Pod, targetAddr, targetHostname string) {
var (
err error
stdout string
timeout = 2 * time.Minute
)

framework.Logf("Waiting up to %v to get response from %s", timeout, targetAddr)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/hostname`, targetAddr)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
stdout, err = framework.RunHostCmd(sourcePod.Namespace, sourcePod.Name, cmd)
if err != nil {
framework.Logf("got err: %v, retry until timeout", err)
continue
}
// Need to check output because it might omit in case of error.
if strings.TrimSpace(stdout) == "" {
framework.Logf("got empty stdout, retry until timeout")
continue
}
break
}

framework.ExpectNoError(err)
framework.ExpectEqual(strings.TrimSpace(stdout), targetHostname)
}

// createSecondNodePortService creates a service with the same selector as config.NodePortService and same HTTP Port
func createSecondNodePortService(f *framework.Framework, config *e2enetwork.NetworkingTestConfig) (*v1.Service, int) {
svc := &v1.Service{
Expand Down

0 comments on commit b289fbb

Please sign in to comment.