Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Poll nodeip:node-port till kube-proxy updates iptables. #20238

Merged
merged 1 commit into from
Jan 28, 2016
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
44 changes: 36 additions & 8 deletions test/e2e/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,10 @@ import (
"k8s.io/kubernetes/pkg/util/wait"
)

// Maximum time a kube-proxy daemon on a node is allowed to not
// notice a Service update, such as type=NodePort.
const kubeProxyLagTimeout = 45 * time.Second

// This should match whatever the default/configured range is
var ServiceNodePortRange = utilnet.PortRange{Base: 30000, Size: 2768}

Expand Down Expand Up @@ -394,12 +398,17 @@ var _ = Describe("Services", func() {

By("hitting the pod through the service's NodePort")
ip := pickNodeIP(c)
testReachable(ip, nodePort)
// Loop for kubeProxyLagTimeout, because different kube-proxies might take
// different times to notice the new Service and open up the node port.
if err := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { return testReachable(ip, nodePort) }); err != nil {
Failf("Could not reach nodePort service through node-ip %v:%v in %v", ip, nodePort, kubeProxyLagTimeout)
}

By("verifying the node port is locked")
hostExec := LaunchHostExecPod(f.Client, f.Namespace.Name, "hostexec")
// Loop a bit because we see transient flakes.
cmd := fmt.Sprintf(`for i in $(seq 1 10); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 0.1; done; exit 1`, nodePort)
// Even if the node-ip:node-port check above passed, this hostexec pod
// might fall on a node with a laggy kube-proxy.
cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort)
stdout, err := RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil {
Failf("expected node port (%d) to be in use, stdout: %v", nodePort, stdout)
Expand Down Expand Up @@ -470,7 +479,12 @@ var _ = Describe("Services", func() {
By("hitting the pod through the service's NodePort")
ip := pickNodeIP(f.Client)
nodePort1 := port.NodePort // Save for later!
testReachable(ip, nodePort1)

// Loop for kubeProxyLagTimeout, because different kube-proxies might take
// different times to notice the new Service and open up the node port.
if err := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { return testReachable(ip, nodePort1) }); err != nil {
Failf("Could not reach nodePort service through node-ip %v:%v in %v", ip, nodePort1, kubeProxyLagTimeout)
}

By("changing service " + serviceName + " to type=LoadBalancer")
service, err = updateService(f.Client, f.Namespace.Name, serviceName, func(s *api.Service) {
Expand Down Expand Up @@ -502,7 +516,13 @@ var _ = Describe("Services", func() {

By("hitting the pod through the service's NodePort")
ip = pickNodeIP(f.Client)
testReachable(ip, nodePort1)

// Loop for kubeProxyLagTimeout, because different kube-proxies might take
// different times to notice the new Service and open up the node port.
if err := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { return testReachable(ip, nodePort1) }); err != nil {
Failf("Could not reach nodePort service through node-ip %v:%v in %v", ip, nodePort1, kubeProxyLagTimeout)
}

By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress1, 80)

Expand Down Expand Up @@ -539,7 +559,13 @@ var _ = Describe("Services", func() {
}

By("hitting the pod through the service's updated NodePort")
testReachable(ip, nodePort2)

// Loop for kubeProxyLagTimeout, because different kube-proxies might take
// different times to notice the new Service and open up the node port.
if err := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { return testReachable(ip, nodePort2) }); err != nil {
Failf("Could not reach nodePort service through node-ip %v:%v in %v", ip, nodePort2, kubeProxyLagTimeout)
}

By("checking the old NodePort is closed")
testNotReachable(ip, nodePort1)

Expand Down Expand Up @@ -870,8 +896,10 @@ var _ = Describe("Services", func() {

if svc1.Spec.Ports[0].Protocol == api.ProtocolTCP {
By("hitting the pod through the service's NodePort")
testReachable(pickNodeIP(c), port.NodePort)

ip := pickNodeIP(c)
if err := wait.PollImmediate(poll, kubeProxyLagTimeout, func() (bool, error) { return testReachable(ip, port.NodePort) }); err != nil {
Failf("Could not reach nodePort service through node-ip %v:%v in %v", ip, port.NodePort, kubeProxyLagTimeout)
}
By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, servicePort)
} else {
Expand Down