Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
Add tests which perform http calls from pods to the VM
  • Loading branch information
rmohr committed Oct 18, 2017
1 parent fa9c7b5 commit 2037dbd
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 48 deletions.
2 changes: 1 addition & 1 deletion automation/test.sh
Expand Up @@ -117,4 +117,4 @@ kubectl get pods
kubectl version

# Run functional tests
FUNC_TEST_ARGS="--ginkgo.noColor" make functest
FUNC_TEST_ARGS="--ginkgo.noColor" make functest DOCKER_TAG=devel
8 changes: 4 additions & 4 deletions glide.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion images/vm-killer/Dockerfile
Expand Up @@ -21,5 +21,5 @@ FROM fedora:26
MAINTAINER "The KubeVirt Project" <kubevirt-dev@googlegroups.com>
ENV container docker

RUN dnf -y install procps-ng \
RUN dnf -y install procps-ng nmap-ncat iproute \
&& dnf -y clean all
46 changes: 46 additions & 0 deletions tests/utils.go
Expand Up @@ -44,6 +44,8 @@ import (
"github.com/google/goexpect"
"k8s.io/client-go/rest"

"os"

"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/pkg/virtctl/console"
Expand Down Expand Up @@ -748,6 +750,10 @@ func newString(x string) *string {
return &x
}

func NewBool(x bool) *bool {
return &x
}

func NewRandomReplicaSetFromVM(vm *v1.VirtualMachine, replicas int32) *v1.VirtualMachineReplicaSet {
name := "replicaset" + rand.String(5)
rs := &v1.VirtualMachineReplicaSet{
Expand Down Expand Up @@ -792,3 +798,43 @@ func NewConsoleExpecter(config *rest.Config, vm *v1.VirtualMachine, consoleName
Check: func() bool { return true },
}, timeout, opts...)
}

func RenderJob(name string, dockerTag string, cmd []string, args []string) *k8sv1.Pod {
job := k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: name,
Labels: map[string]string{
v1.AppLabel: "test",
},
},
Spec: k8sv1.PodSpec{
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: []k8sv1.Container{
{
Name: name,
Image: "kubevirt/vm-killer:" + dockerTag,
Command: cmd,
Args: args,
SecurityContext: &k8sv1.SecurityContext{
Privileged: NewBool(true),
RunAsUser: new(int64),
},
},
},
HostPID: true,
SecurityContext: &k8sv1.PodSecurityContext{
RunAsUser: new(int64),
},
},
}

return &job
}

func GetDockerTag() string {
dockerTag := os.Getenv("docker_tag")
if dockerTag == "" {
dockerTag = "devel"
}
return dockerTag
}
69 changes: 67 additions & 2 deletions tests/vm_networking_test.go
Expand Up @@ -28,6 +28,13 @@ import (

"github.com/google/goexpect"

"fmt"

v12 "k8s.io/api/core/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/onsi/ginkgo/extensions/table"

"kubevirt.io/kubevirt/pkg/api/v1"
"kubevirt.io/kubevirt/pkg/kubecli"
"kubevirt.io/kubevirt/tests"
Expand All @@ -48,8 +55,10 @@ var _ = Describe("Networking", func() {

Context("VirtualMachine With nodeNetwork definition given", func() {

It("should connect via DHCP to the node network", func() {
vm, err := tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", "noCloud", "#!/bin/bash\necho 'hello'\n")
var vm *v1.VirtualMachine

BeforeEach(func() {
vm, err = tests.NewRandomVMWithEphemeralDiskAndUserdata("kubevirt/cirros-registry-disk-demo:devel", "noCloud", "#!/bin/bash\necho 'hello'\n")
Expect(err).ToNot(HaveOccurred())

// add node network
Expand All @@ -60,6 +69,12 @@ var _ = Describe("Networking", func() {
Expect(err).ToNot(HaveOccurred())
tests.WaitForSuccessfulVMStart(vm)

// Fetch the new VM with updated status
vm, err = virtClient.VM(tests.NamespaceTestDefault).Get(vm.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
})

It("should be able to reach the internet", func() {
// Wait until the VM is booted, ping google and check if we can reach the internet
expecter, _, err := tests.NewConsoleExpecter(virtConfig, vm, "", 10*time.Second)
defer expecter.Close()
Expand All @@ -78,6 +93,56 @@ var _ = Describe("Networking", func() {
}, 90*time.Second)
Expect(err).ToNot(HaveOccurred())
}, 120)

table.DescribeTable("should be reachable via the propagated IP from a Pod", func(op v12.NodeSelectorOperator) {

ip := vm.Status.Interfaces[0].IP

// Run netcat and give it one second to ghet "Hello World!" back from the VM
check := []string{fmt.Sprintf("while read x; do test \"$x\" = \"Hello World!\"; exit $?; done < <(nc %s 1500 -i 1 -w 1)", ip)}
job := tests.RenderJob("netcat", tests.GetDockerTag(), []string{"/bin/bash", "-c"}, check)
job.Spec.Affinity = &v12.Affinity{
NodeAffinity: &v12.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v12.NodeSelector{
NodeSelectorTerms: []v12.NodeSelectorTerm{
{
MatchExpressions: []v12.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: op, Values: []string{vm.Status.NodeName}},
},
},
},
},
},
}

// Wait until the VM is booted, and start a minimalistic dhcp server
expecter, _, err := tests.NewConsoleExpecter(virtConfig, vm, "", 10*time.Second)
defer expecter.Close()
Expect(err).ToNot(HaveOccurred())
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "\n"},
&expect.BExp{R: "cirros login: "},
&expect.BSnd{S: "cirros\n"},
&expect.BExp{R: "Password: "},
&expect.BSnd{S: "cubswin:)\n"},
&expect.BExp{R: "\\$ "},
&expect.BSnd{S: "nc -klp 1500 -e echo -e \"Hello World!\"\n"},
}, 90*time.Second)
Expect(err).ToNot(HaveOccurred())

job, err = virtClient.CoreV1().Pods(vm.ObjectMeta.Namespace).Create(job)
Expect(err).ToNot(HaveOccurred())

Eventually(func() v12.PodPhase {
j, err := virtClient.Core().Pods(vm.ObjectMeta.Namespace).Get(job.ObjectMeta.Name, v13.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(j.Status.Phase).ToNot(Equal(v12.PodFailed))
return j.Status.Phase
}, 30*time.Second, 1*time.Second).Should(Equal(v12.PodSucceeded))

},
table.Entry("on the same node", v12.NodeSelectorOpIn),
table.Entry("on a different node", v12.NodeSelectorOpNotIn))
})

})
51 changes: 11 additions & 40 deletions tests/vmlifecycle_test.go
Expand Up @@ -43,10 +43,6 @@ var _ = Describe("Vmlifecycle", func() {
if primaryNodeName == "" {
primaryNodeName = "master"
}
dockerTag := os.Getenv("docker_tag")
if dockerTag == "" {
dockerTag = "latest"
}

flag.Parse()

Expand Down Expand Up @@ -147,7 +143,7 @@ var _ = Describe("Vmlifecycle", func() {
Expect(err).ToNot(HaveOccurred())

time.Sleep(10 * time.Second)
err = pkillAllVms(virtClient, nodeName, dockerTag)
err = pkillAllVms(virtClient, nodeName)
Expect(err).To(BeNil())

tests.NewObjectEventWatcher(obj).SinceWatchedObjectResourceVersion().WaitFor(tests.WarningEvent, v1.Stopped)
Expand All @@ -170,7 +166,7 @@ var _ = Describe("Vmlifecycle", func() {
Expect(ok).To(BeTrue(), "Object is not of type *v1.VM")
Expect(err).ToNot(HaveOccurred())

err = pkillAllVms(virtClient, nodeName, dockerTag)
err = pkillAllVms(virtClient, nodeName)
Expect(err).To(BeNil())

// Wait for stop event of the VM
Expand Down Expand Up @@ -237,43 +233,18 @@ var _ = Describe("Vmlifecycle", func() {
})
})

func renderPkillAllVmsJob(dockerTag string) *k8sv1.Pod {
job := k8sv1.Pod{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "vm-killer",
Labels: map[string]string{
v1.AppLabel: "test",
},
},
Spec: k8sv1.PodSpec{
RestartPolicy: k8sv1.RestartPolicyNever,
Containers: []k8sv1.Container{
{
Name: "vm-killer",
Image: "kubevirt/vm-killer:" + dockerTag,
Command: []string{
"pkill",
"-9",
"qemu",
},
SecurityContext: &k8sv1.SecurityContext{
Privileged: newBool(true),
RunAsUser: new(int64),
},
},
},
HostPID: true,
SecurityContext: &k8sv1.PodSecurityContext{
RunAsUser: new(int64),
},
},
}
func renderPkillAllVmsJob() *k8sv1.Pod {

return &job
return tests.RenderJob("vm-killer", tests.GetDockerTag(),
[]string{
"pkill",
"-9",
"qemu",
}, nil)
}

func pkillAllVms(virtCli kubecli.KubevirtClient, node, dockerTag string) error {
job := renderPkillAllVmsJob(dockerTag)
func pkillAllVms(virtCli kubecli.KubevirtClient, node string) error {
job := renderPkillAllVmsJob()
job.Spec.NodeName = node
_, err := virtCli.CoreV1().Pods(tests.NamespaceTestDefault).Create(job)

Expand Down

0 comments on commit 2037dbd

Please sign in to comment.