Skip to content

Commit

Permalink
Add e2e test for kube controller manager lease configmap deletion rec…
Browse files Browse the repository at this point in the history
…overy
  • Loading branch information
damemi committed Nov 13, 2019
1 parent ad89ca9 commit 9cd6ba9
Showing 1 changed file with 81 additions and 0 deletions.
81 changes: 81 additions & 0 deletions test/e2e/operator_test.go
Expand Up @@ -2,8 +2,12 @@ package e2e

import (
"testing"
"time"

v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"

"github.com/openshift/cluster-kube-controller-manager-operator/pkg/operator/operatorclient"
Expand All @@ -24,3 +28,80 @@ func TestOperatorNamespace(t *testing.T) {
t.Fatal(err)
}
}

func TestKCMRecovery(t *testing.T) {
// This is an e2e test to verify that KCM can recover from having its lease configmap deleted
// See https://bugzilla.redhat.com/show_bug.cgi?id=1744984
kubeConfig, err := test.NewClientConfigForTest()
if err != nil {
t.Fatal(err)
}
kubeClient, err := kubernetes.NewForConfig(kubeConfig)
if err != nil {
t.Fatal(err)
}

// Try to delete the kube controller manager's configmap in kube-system
err = kubeClient.CoreV1().ConfigMaps("kube-system").Delete("kube-controller-manager", &metav1.DeleteOptions{})
if err != nil {
t.Fatal(err)
}

// Get all the currently running KCM pods, and then delete them
pods, err := kubeClient.CoreV1().Pods("openshift-kube-controller-manager").List(metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
for _, pod := range pods.Items {
err := kubeClient.CoreV1().Pods("openshift-kube-controller-manager").Delete(pod.Name, &metav1.DeleteOptions{})
if err != nil {
t.Fatal(err)
}
}

// Now try to create a replicaset and see that it is successfully created by the controller manager
one := int64(1)
replicas := int32(1)
replicaSet := &v1.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: "router",
},
Spec: v1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "test"},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "test"},
},
Spec: corev1.PodSpec{
TerminationGracePeriodSeconds: &one,
Containers: []corev1.Container{
{
Name: "test",
Image: "busybox:latest",
Command: []string{"/bin/true"},
},
},
},
},
},
}
_, err = kubeClient.AppsV1().ReplicaSets(operatorclient.OperatorNamespace).Create(replicaSet)
if err != nil {
t.Fatal(err)
}

// Poll to see if pods from replicaset are created which would indicate KCM recovered
wait.Poll(time.Second*5, time.Second*300, func() (bool, error) {
pods, err := kubeClient.CoreV1().Pods(operatorclient.OperatorNamespace).List(metav1.ListOptions{LabelSelector:"app=test"})
if err != nil {
return false, err
}
if len(pods.Items) > 0 {
return true, nil
}
return false, nil
})
}

0 comments on commit 9cd6ba9

Please sign in to comment.