Skip to content

Commit

Permalink
Merge pull request #65 from lina-nikiforova/test_unreachable_host
Browse files Browse the repository at this point in the history
Bug 1797563: added TestUnreachableHost
  • Loading branch information
openshift-merge-robot committed Feb 3, 2020
2 parents 94cde7c + 6065fb9 commit 5e9ed4d
Show file tree
Hide file tree
Showing 4 changed files with 172 additions and 24 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ test-unit:
.PHONY: test-unit

test-e2e:
go test ./test/integration $(TEST_OPTIONS)
go test ./test/integration -timeout 1h $(TEST_OPTIONS)
.PHONY: test-e2e

vendor:
Expand Down
42 changes: 34 additions & 8 deletions test/integration/basic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,17 @@ package integration
import (
"encoding/json"
"testing"
"time"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)

// Check if opt-in/opt-out works
func TestOptOutOptIn(t *testing.T) {
// Backup pull secret from openshift-config namespace.
// oc extract secret/pull-secret -n openshift-config --to=.
pullSecret, err := kubeClient.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
pullSecret, err := clientset.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
Expand All @@ -38,28 +40,52 @@ func TestOptOutOptIn(t *testing.T) {

// Update the global cluster pull secret.
// oc set data secret/pull-secret -n openshift-config --from-file=.dockerconfigjson=<pull-secret-location>
_, err = kubeClient.CoreV1().Secrets("openshift-config").Update(newPullSecret)
_, err = clientset.CoreV1().Secrets("openshift-config").Update(newPullSecret)
if err != nil {
t.Fatal(err.Error())
}
// Check the logs - Logs contains the line "The operator is marked as disabled" and no reports are uploaded
RestartInsightsOperator(t)
CheckPodsLogs(t, kubeClient, "The operator is marked as disabled")
restartInsightsOperator(t)
checkPodsLogs(t, clientset, "The operator is marked as disabled")

// Upload backuped secret
latestSecret, err := kubeClient.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
latestSecret, err := clientset.CoreV1().Secrets("openshift-config").Get("pull-secret", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
resourceVersion := latestSecret.GetResourceVersion()
pullSecret.SetResourceVersion(resourceVersion) // need to update the version, otherwise operation is not permitted

_, err = kubeClient.CoreV1().Secrets("openshift-config").Update(pullSecret)
errConfig := wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) {
objs := map[string]interface{}{}
errUnmarshals := json.Unmarshal([]byte(pullSecret.Data[".dockerconfigjson"]), &objs)
if errUnmarshals != nil {
t.Fatal(errUnmarshal.Error())
}
for key := range objs["auths"].(map[string]interface{}) {
if key == "cloud.openshift.com" {
return true, nil
}
}
return false, nil
})
t.Log(errConfig)

newSecret, err := clientset.CoreV1().Secrets("openshift-config").Update(pullSecret)
if err != nil {
t.Fatal(err.Error())
}
t.Logf("%v\n", newSecret)

// Check if reports are uploaded - Logs show that insights-operator is enabled and reports are uploaded
RestartInsightsOperator(t)
CheckPodsLogs(t, kubeClient, "Successfully reported")
restartInsightsOperator(t)
errDisabled := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
insightsDisabled := isOperatorDisabled(t, clusterOperatorInsights())
if insightsDisabled {
return false, nil
}
return true, nil
})
t.Log(errDisabled)
checkPodsLogs(t, clientset, "Successfully reported")
}
65 changes: 62 additions & 3 deletions test/integration/bugs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,23 +2,82 @@ package integration

import (
"testing"
"time"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
)

// https://bugzilla.redhat.com/show_bug.cgi?id=1750665
func TestDefaultUploadFrequency(t *testing.T) {
// delete any existing overriding secret
err := kubeClient.CoreV1().Secrets("openshift-config").Delete("support", &metav1.DeleteOptions{})
err := clientset.CoreV1().Secrets("openshift-config").Delete("support", &metav1.DeleteOptions{})

// if the secret is not found, continue, not a problem
if err != nil && err.Error() != `secrets "support" not found` {
t.Fatal(err.Error())
}

// restart insights-operator (delete pods)
RestartInsightsOperator(t)
restartInsightsOperator(t)

// check logs for "Gathering cluster info every 2h0m0s"
CheckPodsLogs(t, kubeClient, "Gathering cluster info every 2h0m0s")
checkPodsLogs(t, clientset, "Gathering cluster info every 2h0m0s")
}

// TestUnreachableHost checks if insights operator reports "degraded" after 5 unsuccessful upload attempts
// https://bugzilla.redhat.com/show_bug.cgi?id=1745973
func TestUnreachableHost(t *testing.T) {
// Replace the endpoint to some not valid url.
// oc -n openshift-config create secret generic support --from-literal=endpoint=http://localhost --dry-run -o yaml | oc apply -f - -n openshift-config
modifiedSecret := corev1.Secret{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: "support",
Namespace: "openshift-config",
},
Data: map[string][]byte{
"endpoint": []byte("http://localhost"),
"interval": []byte("3m"), // for faster testing
},
Type: "Opaque",
}
// delete any existing overriding secret
err := clientset.CoreV1().Secrets("openshift-config").Delete("support", &metav1.DeleteOptions{})

// if the secret is not found, continue, not a problem
if err != nil && err.Error() != `secrets "support" not found` {
t.Fatal(err.Error())
}
_, err = clientset.CoreV1().Secrets("openshift-config").Create(&modifiedSecret)
if err != nil {
t.Fatal(err.Error())
}
// Restart insights-operator
// oc delete pods --namespace=openshift-insights --all
restartInsightsOperator(t)

// Check the logs
checkPodsLogs(t, clientset, "exceeded than threshold 5. Marking as degraded.")

// Check the operator is degraded
insightsDegraded := isOperatorDegraded(t, clusterOperatorInsights())
if !insightsDegraded {
t.Fatal("Insights is not degraded")
}
// Delete secret
err = clientset.CoreV1().Secrets("openshift-config").Delete("support", &metav1.DeleteOptions{})
if err != nil {
t.Fatal(err.Error())
}
// Check the operator is not degraded anymore
errDegraded := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
insightsDegraded := isOperatorDegraded(t, clusterOperatorInsights())
if insightsDegraded {
return false, nil
}
return true, nil
})
t.Log(errDegraded)
}
87 changes: 75 additions & 12 deletions test/integration/main_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,16 +9,19 @@ import (
"testing"
"time"

configv1 "github.com/openshift/api/config/v1"
configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
)

var kubeClient = KubeClient()
var clientset = kubeClient()
var configClient = configV1Client()

func KubeClient() (result *kubernetes.Clientset) {
func kubeClient() (result *kubernetes.Clientset) {
kubeconfig, ok := os.LookupEnv("KUBECONFIG") // variable is a path to the local kubeconfig
if !ok {
fmt.Printf("kubeconfig variable is not set\n")
Expand All @@ -32,24 +35,84 @@ func KubeClient() (result *kubernetes.Clientset) {
os.Exit(1)
}

kubeClient, err := kubernetes.NewForConfig(config)
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return kubeClient
return clientset
}

func RestartInsightsOperator(t *testing.T) {
func configV1Client() (result *configv1client.ConfigV1Client) {
kubeconfig, ok := os.LookupEnv("KUBECONFIG") // variable is a path to the local kubeconfig
if !ok {
fmt.Printf("kubeconfig variable is not set\n")
} else {
fmt.Printf("KUBECONFIG=%s\n", kubeconfig)
}
// use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
fmt.Printf("%#v", err)
os.Exit(1)
}

client, err := configv1client.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return client
}

func clusterOperatorInsights() *configv1.ClusterOperator {
// get info about insights cluster operator
operator, err := configClient.ClusterOperators().Get("insights", metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
return operator
}

func isOperatorDegraded(t *testing.T, operator *configv1.ClusterOperator) bool {
statusConditions := operator.Status.Conditions

for _, condition := range statusConditions {
if condition.Type == "Degraded" {
if condition.Status != "True" {
t.Log("Operator is not degraded")
return false
}
}
}
t.Log("Operator is degraded")
return true
}

func isOperatorDisabled(t *testing.T, operator *configv1.ClusterOperator) bool {
statusConditions := operator.Status.Conditions

for _, condition := range statusConditions {
if condition.Type == "Disabled" {
if condition.Status != "True" {
t.Log("Operator is not Disabled")
return false
}
}
}
t.Log("Operator is disabled")
return true
}

func restartInsightsOperator(t *testing.T) {
// restart insights-operator (delete pods)
pods, err := kubeClient.CoreV1().Pods("openshift-insights").List(metav1.ListOptions{})
pods, err := clientset.CoreV1().Pods("openshift-insights").List(metav1.ListOptions{})
if err != nil {
t.Fatal(err.Error())
}

for _, pod := range pods.Items {
kubeClient.CoreV1().Pods("openshift-insights").Delete(pod.Name, &metav1.DeleteOptions{})
clientset.CoreV1().Pods("openshift-insights").Delete(pod.Name, &metav1.DeleteOptions{})
err := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
_, err := kubeClient.CoreV1().Pods("openshift-insights").Get(pod.Name, metav1.GetOptions{})
_, err := clientset.CoreV1().Pods("openshift-insights").Get(pod.Name, metav1.GetOptions{})
if err == nil {
t.Logf("the pod is not yet deleted: %v\n", err)
return false, nil
Expand All @@ -62,14 +125,14 @@ func RestartInsightsOperator(t *testing.T) {

// check new pods are created and running
errPod := wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
newPods, _ := kubeClient.CoreV1().Pods("openshift-insights").List(metav1.ListOptions{})
newPods, _ := clientset.CoreV1().Pods("openshift-insights").List(metav1.ListOptions{})
if len(newPods.Items) == 0 {
t.Log("pods are not yet created")
return false, nil
}

for _, newPod := range newPods.Items {
pod, err := kubeClient.CoreV1().Pods("openshift-insights").Get(newPod.Name, metav1.GetOptions{})
pod, err := clientset.CoreV1().Pods("openshift-insights").Get(newPod.Name, metav1.GetOptions{})
if err != nil {
panic(err.Error())
}
Expand All @@ -84,7 +147,7 @@ func RestartInsightsOperator(t *testing.T) {
t.Log(errPod)
}

func CheckPodsLogs(t *testing.T, kubeClient *kubernetes.Clientset, message string) {
func checkPodsLogs(t *testing.T, kubeClient *kubernetes.Clientset, message string) {
newPods, err := kubeClient.CoreV1().Pods("openshift-insights").List(metav1.ListOptions{})
if err != nil {
t.Fatal(err.Error())
Expand Down Expand Up @@ -128,7 +191,7 @@ func CheckPodsLogs(t *testing.T, kubeClient *kubernetes.Clientset, message strin

func TestMain(m *testing.M) {
// check the operator is up
err := waitForOperator(kubeClient)
err := waitForOperator(clientset)
if err != nil {
fmt.Println("failed waiting for operator to start")
os.Exit(1)
Expand Down

0 comments on commit 5e9ed4d

Please sign in to comment.