Skip to content

Commit

Permalink
test: Run curl from k8s3 instead of client-from-outside container
Browse files Browse the repository at this point in the history
Previously, we ran curl from the "client-from-outside" container
in the tests which required sending requests from a third host.

We simulated the third host by running a container
("client-from-outside") in a Docker network which was not managed
by Cilium.

Unfortunately, requests sent to a NodePort service from the container
were handled by bpf_sock.c which prevented from testing the NodePort
implementation in bpf_netdev.c.

Fix it by introducing a "real" host, and run curl from it.

Signed-off-by: Martynas Pumputis <m@lambda.lt>
  • Loading branch information
brb authored and aanm committed Jan 17, 2020
1 parent d3119f7 commit 230b805
Show file tree
Hide file tree
Showing 5 changed files with 58 additions and 33 deletions.
11 changes: 11 additions & 0 deletions test/ginkgo-ext/scopes.go
Expand Up @@ -558,3 +558,14 @@ func SkipContextIf(condition func() bool, text string, body func()) bool {

return Context(text, body)
}

// SkipItIf executes the given body if the given condition is NOT met.
func SkipItIf(condition func() bool, text string, body func()) bool {
if condition() {
return It(text, func() {
Skip("skipping due to unmet condition")
})
}

return It(text, body)
}
13 changes: 13 additions & 0 deletions test/helpers/utils.go
Expand Up @@ -24,6 +24,7 @@ import (
"math/rand"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
Expand Down Expand Up @@ -457,6 +458,18 @@ func DoesNotRunOnNetNext() bool {
return !RunsOnNetNext()
}

// DoesNotHaveHosts returns a function which returns true if a CI job
// has less VMs than the given count.
func DoesNotHaveHosts(count int) func() bool {
return func() bool {
if c, err := strconv.Atoi(os.Getenv("K8S_NODES")); err != nil {
return true
} else {
return c < count
}
}
}

// CiliumDevImage returns cilium docker image name based on cilium.registry option and const CiliumDevImage
func CiliumDevImage() string {
return fmt.Sprintf(ciliumDeveloperImage, config.CiliumTestConfig.Registry)
Expand Down
41 changes: 23 additions & 18 deletions test/k8sT/Services.go
Expand Up @@ -245,10 +245,9 @@ var _ = Describe("K8sServicesTest", func() {
}
}

doRequestsFromOutsideClientWithLocalPort :=
doRequestsFromThirdHostWithLocalPort :=
func(url string, count int, checkSourceIP bool, fromPort int) {
var cmd string
ssh := helpers.GetVagrantSSHMeta(helpers.K8s1VMName())
By("Making %d HTTP requests from outside cluster to %q", count, url)
for i := 1; i <= count; i++ {
if fromPort == 0 {
Expand All @@ -259,16 +258,18 @@ var _ = Describe("K8sServicesTest", func() {
if checkSourceIP {
cmd += " | grep client_address="
}
res := ssh.ContainerExec("client-from-outside", cmd)
k8s3Name, k8s3IP := getNodeInfo(helpers.K8s3)
res, err := kubectl.ExecInHostNetNS(context.TODO(), k8s3Name, cmd)
Expect(err).Should(BeNil(), "Cannot exec in k8s3 host netns")
ExpectWithOffset(1, res).Should(helpers.CMDSuccess(),
"Can not connect to service %q from outside cluster", url)
if checkSourceIP {
Expect(strings.TrimSpace(strings.Split(res.GetStdOut(), "=")[1])).To(Equal("192.168.10.10"))
Expect(strings.TrimSpace(strings.Split(res.GetStdOut(), "=")[1])).To(Equal(k8s3IP))
}
}
}
doRequestsFromOutsideClient := func(url string, count int, checkSourceIP bool) {
doRequestsFromOutsideClientWithLocalPort(url, count, checkSourceIP, 0)
doRequestsFromThirdHost := func(url string, count int, checkSourceIP bool) {
doRequestsFromThirdHostWithLocalPort(url, count, checkSourceIP, 0)
}

testNodePort := func(bpfNodePort bool) {
Expand Down Expand Up @@ -339,7 +340,7 @@ var _ = Describe("K8sServicesTest", func() {

count := 10
url := getURL(k8s1IP, data.Spec.Ports[0].NodePort)
doRequestsFromOutsideClient(url, count, true)
doRequestsFromThirdHost(url, count, true)

// Checks that requests to k8s2 succeed, while requests to k8s1 are dropped
err = kubectl.Get(helpers.DefaultNamespace, "service test-nodeport-local-k8s2").Unmarshal(&data)
Expand All @@ -358,7 +359,7 @@ var _ = Describe("K8sServicesTest", func() {
testNodePort(false)
})

It("Tests NodePort (kube-proxy) with externalTrafficPolicy=Local", func() {
SkipItIf(helpers.DoesNotHaveHosts(3), "Tests NodePort (kube-proxy) with externalTrafficPolicy=Local", func() {
testExternalTrafficPolicyLocal()
})

Expand All @@ -383,7 +384,7 @@ var _ = Describe("K8sServicesTest", func() {
})
})

SkipContextIf(helpers.DoesNotRunOnNetNext, "Tests NodePort BPF", func() {
SkipContextIf(func() bool { return helpers.DoesNotRunOnNetNext() || helpers.DoesNotHaveHosts(3)() }, "Tests NodePort BPF", func() {
// TODO(brb) Add with L7 policy test cases after GH#8971 has been fixed

nativeDev := "enp0s8"
Expand Down Expand Up @@ -469,7 +470,7 @@ var _ = Describe("K8sServicesTest", func() {

k8s1Name, _ := getNodeInfo(helpers.K8s1)
k8s2Name, _ := getNodeInfo(helpers.K8s2)
doRequestsFromOutsideClient("http://"+lbIP, 10, false)
doRequestsFromThirdHost("http://"+lbIP, 10, false)
doRequests("http://"+lbIP, 10, k8s1Name)
doRequests("http://"+lbIP, 10, k8s2Name)
})
Expand All @@ -489,8 +490,9 @@ var _ = Describe("K8sServicesTest", func() {
var data v1.Service
err := kubectl.Get(helpers.DefaultNamespace, "service test-nodeport").Unmarshal(&data)
Expect(err).Should(BeNil(), "Cannot retrieve service")
url := getURL(helpers.K8s1Ip, data.Spec.Ports[0].NodePort)
doRequestsFromOutsideClient(url, 10, true)
_, k8s1IP := getNodeInfo(helpers.K8s1)
url := getURL(k8s1IP, data.Spec.Ports[0].NodePort)
doRequestsFromThirdHost(url, 10, true)

// Test whether DSR NAT entries are evicted by GC

Expand All @@ -500,17 +502,20 @@ var _ = Describe("K8sServicesTest", func() {
// client -> k8s1 -> endpoint @ k8s2.
err = kubectl.Get(helpers.DefaultNamespace, "service test-nodeport-k8s2").Unmarshal(&data)
Expect(err).Should(BeNil(), "Cannot retrieve service")
url = getURL(helpers.K8s1Ip, data.Spec.Ports[0].NodePort)
url = getURL(k8s1IP, data.Spec.Ports[0].NodePort)

doRequestsFromOutsideClientWithLocalPort(url, 1, true, 64000)
doRequestsFromThirdHostWithLocalPort(url, 1, true, 64000)
res := kubectl.CiliumExec(pod, "cilium bpf nat list | grep 64000")
Expect(res.GetStdOut()).ShouldNot(BeEmpty(), "NAT entry was not evicted")
res.ExpectSuccess("Unable to list NAT entries")
// TODO(brb) Uncomment all "res.ExpectSuccess()" after adding
// IPv6 DSR support (cilium bpf {ct,nat} cmds exit with 1
// due to missing ipv6 maps).
// res.ExpectSuccess("Unable to list NAT entries")
// Flush CT maps to trigger eviction of the NAT entries (simulates CT GC)
res = kubectl.CiliumExec(pod, "cilium bpf ct flush global")
res.ExpectSuccess("Unable to flush CT maps")
kubectl.CiliumExec(pod, "cilium bpf ct flush global")
// res.ExpectSuccess("Unable to flush CT maps")
res = kubectl.CiliumExec(pod, "cilium bpf nat list | grep 64000")
res.ExpectSuccess("Unable to list NAT entries")
//res.ExpectSuccess("Unable to list NAT entries")
Expect(res.GetStdOut()).Should(BeEmpty(), "NAT entry was not evicted")
})
})
Expand Down
11 changes: 11 additions & 0 deletions test/k8sT/manifests/demo_ds.yaml
Expand Up @@ -13,6 +13,17 @@ spec:
labels:
zgroup: testDS
spec:
# Some tests (e.g. DSR) requires to send requests to testds from a host
# which does not run any of its pods. We've chosen k8s3 to be such host.
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "cilium.io/ci-node"
operator: NotIn
values:
- k8s3
containers:
- name: web
image: docker.io/cilium/echoserver:1.10
Expand Down
15 changes: 0 additions & 15 deletions test/provision/k8s_install.sh
Expand Up @@ -342,19 +342,4 @@ docker network create --subnet=192.168.9.0/24 outside
docker run --net outside --ip 192.168.9.10 --restart=always -d docker.io/cilium/demo-httpd:latest
docker run --net outside --ip 192.168.9.11 --restart=always -d docker.io/cilium/demo-httpd:latest

if [[ "${HOST}" == "k8s1" ]]; then
# To avoid SNAT'ing source IP, we create a network with masquerading disabled.
# Also, we install a route on each other node to make it possible a replies from
# remote nodes to reach containers attached to the network.
docker network create --subnet=192.168.10.0/24 \
--opt 'com.docker.network.bridge.enable_ip_masquerade=false' \
outside-no-masq
# NOTE: when changing "client-from-outside" IP addr, make sure that the IP addr
# is changed in the tests (grep for the IP addr).
docker run --name client-from-outside --net outside-no-masq --ip 192.168.10.10 \
--restart=always -d docker.io/cilium/demo-client:latest
else
sudo ip route add 192.168.10.0/24 via 192.168.36.11 || true
fi

sudo touch /etc/provision_finished

0 comments on commit 230b805

Please sign in to comment.